diff options
author | Jesse Barnes <jbarnes@virtuousgeek.org> | 2008-08-16 11:45:53 -0700 |
---|---|---|
committer | Jesse Barnes <jbarnes@virtuousgeek.org> | 2008-08-16 11:45:53 -0700 |
commit | d313108167a793652a5fe4c1015198e0a9deac4c (patch) | |
tree | 70702ea339cdcec621a8f7251c673c80a3acb73e | |
parent | 893315d49ed678de95cf6ac553efb6093cc7343c (diff) | |
parent | 2030db75328b7d896a5dd030fc171020b33149e1 (diff) |
Merge branch 'modesetting-gem' of ssh://git.freedesktop.org/git/mesa/drm into modesetting-gem
46 files changed, 5426 insertions, 4429 deletions
diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c index e2dd9dc7..15da398f 100644 --- a/libdrm/intel/intel_bufmgr_fake.c +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -653,7 +653,7 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name, bo_fake->refcount = 1; bo_fake->id = ++bufmgr_fake->buf_nr; bo_fake->name = name; - bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE; + bo_fake->flags = BM_PINNED; bo_fake->is_static = 1; DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 44c7a8d2..b370e015 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -12,17 +12,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ - drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ - drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \ + drm_hashtab.o drm_mm.o drm_compat.o \ + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \ drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \ drm_regman.o drm_vm_nopage_compat.o drm_gem.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o i810-objs := i810_drv.o i810_dma.o -i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ - i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \ - i915_opregion.o \ +i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ + i915_compat.o i915_suspend.o i915_opregion.o \ i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \ intel_display.o intel_crt.o intel_lvds.o intel_bios.o \ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \ @@ -43,8 +42,9 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv50_kms_wrapper.o \ nv50_fbcon.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_gem.o \ - radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o \ - atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o + radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o radeon_cs.o \ + atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o radeon_legacy_crtc.o radeon_legacy_encoders.o \ + radeon_cursor.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o savage-objs := savage_drv.o savage_bci.o savage_state.o diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c index 3aa445e8..9b954291 100644 --- a/linux-core/ati_pcigart.c +++ b/linux-core/ati_pcigart.c @@ -90,7 +90,7 @@ int drm_ati_alloc_pcigart_table(struct drm_device *dev, if (gart_info->table_handle == NULL) return -ENOMEM; - memset(gart_info->table_handle, 0, gart_info->table_size); + memset(gart_info->table_handle->vaddr, 0, gart_info->table_size); return 0; } EXPORT_SYMBOL(drm_ati_alloc_pcigart_table); @@ -111,7 +111,6 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info /* we need to support large memory configurations */ if (!entry) { - DRM_ERROR("no scatter/gather memory!\n"); return 0; } @@ -206,11 +205,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga ret = 1; -#if defined(__i386__) || defined(__x86_64__) - wbinvd(); -#else mb(); -#endif done: gart_info->addr = address; @@ -266,11 +261,7 @@ static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend, gart_insert_page_into_table(info, page_base, pci_gart + j); } -#if defined(__i386__) || defined(__x86_64__) - wbinvd(); -#else mb(); -#endif atipci_be->gart_flush_fn(atipci_be->dev); diff --git a/linux-core/atombios_crtc.c b/linux-core/atombios_crtc.c index 1f372045..0a86f36b 100644 --- a/linux-core/atombios_crtc.c +++ b/linux-core/atombios_crtc.c @@ -31,6 +31,22 @@ #include "atom.h" #include "atom-bits.h" +static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); + ENABLE_CRTC_PS_ALLOCATION args; + + memset(&args, 0, sizeof(args)); + + args.ucCRTC = radeon_crtc->crtc_id; + args.ucEnable = lock; + + atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); +} + static void atombios_enable_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -127,7 +143,7 @@ void atombios_crtc_set_timing(struct drm_crtc *crtc, SET_CRTC_TIMING_PARAMETERS_ conv_param.ucOverscanRight = crtc_param->ucOverscanRight; conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft; conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom; - conv_param.ucOverscanTop = crtc_param->ucOverscanTop; + conv_param.ucOverscanTop = crtc_param->ucOverscanTop; conv_param.ucReserved = crtc_param->ucReserved; printk("executing set crtc timing\n"); @@ -150,29 +166,21 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, memset(&spc_param, 0, sizeof(SET_PIXEL_CLOCK_PS_ALLOCATION)); - if (radeon_is_avivo(dev_priv)) { - uint32_t temp; + pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + radeon_compute_pll(&dev_priv->mode_info.pll, mode->clock, + &sclock, &fb_div, &ref_div, &post_div, pll_flags); - radeon_compute_pll(&dev_priv->mode_info.pll, mode->clock, - &temp, &fb_div, &ref_div, &post_div, pll_flags); - sclock = temp; + if (radeon_is_avivo(dev_priv)) { + uint32_t ss_cntl; if (radeon_crtc->crtc_id == 0) { - temp = RADEON_READ(AVIVO_P1PLL_INT_SS_CNTL); - RADEON_WRITE(AVIVO_P1PLL_INT_SS_CNTL, temp & ~1); + ss_cntl = RADEON_READ(AVIVO_P1PLL_INT_SS_CNTL); + RADEON_WRITE(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1); } else { - temp = RADEON_READ(AVIVO_P2PLL_INT_SS_CNTL); - RADEON_WRITE(AVIVO_P2PLL_INT_SS_CNTL, temp & ~1); + ss_cntl = RADEON_READ(AVIVO_P2PLL_INT_SS_CNTL); + RADEON_WRITE(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1); } - } else { -#if 0 // TODO r400 - sclock = save->dot_clock_freq; - fb_div = save->feedback_div; - post_div = save->post_div; - ref_div = save->ppll_ref_div; -#endif } /* */ @@ -201,7 +209,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, spc3_ptr->ucPostDiv = post_div; spc3_ptr->ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); - + /* TODO insert output encoder object stuff herre for r600 */ break; default: @@ -220,7 +228,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_radeon_private *dev_priv = dev->dev_private; struct radeon_framebuffer *radeon_fb; @@ -251,19 +259,17 @@ void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y) DRM_ERROR("Unsupported screen depth %d\n", crtc->fb->bits_per_pixel); return; } - + /* TODO tiling */ if (radeon_crtc->crtc_id == 0) RADEON_WRITE(AVIVO_D1VGA_CONTROL, 0); else RADEON_WRITE(AVIVO_D2VGA_CONTROL, 0); - - RADEON_WRITE(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, AVIVO_D1GRPH_UPDATE_LOCK); - + RADEON_WRITE(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location); RADEON_WRITE(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location); RADEON_WRITE(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); - + RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); RADEON_WRITE(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, x); @@ -274,20 +280,13 @@ void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y) fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); RADEON_WRITE(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); RADEON_WRITE(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); - - /* unlock the grph regs */ - RADEON_WRITE(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, 0); - - /* lock the mode regs */ - RADEON_WRITE(AVIVO_D1SCL_UPDATE + radeon_crtc->crtc_offset, AVIVO_D1SCL_UPDATE_LOCK); - + RADEON_WRITE(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, crtc->mode.vdisplay); RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y); RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); - /* unlock the mode regs */ - RADEON_WRITE(AVIVO_D1SCL_UPDATE + radeon_crtc->crtc_offset, 0); + } void atombios_crtc_mode_set(struct drm_crtc *crtc, @@ -324,7 +323,7 @@ void atombios_crtc_mode_set(struct drm_crtc *crtc, if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY; - + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY; @@ -337,9 +336,8 @@ void atombios_crtc_mode_set(struct drm_crtc *crtc, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE; - if (radeon_is_avivo(dev_priv)) { + if (radeon_is_avivo(dev_priv)) atombios_crtc_set_base(crtc, x, y); - } atombios_crtc_set_pll(crtc, adjusted_mode, pll_flags); @@ -357,11 +355,13 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + atombios_lock_crtc(crtc, 1); } static void atombios_crtc_commit(struct drm_crtc *crtc) { atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + atombios_lock_crtc(crtc, 0); } static const struct drm_crtc_helper_funcs atombios_helper_funcs = { diff --git a/linux-core/drmP.h b/linux-core/drmP.h index bfe2ae1a..1b71b45d 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -166,7 +166,6 @@ typedef unsigned long uintptr_t; #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAP_HASH_OFFSET 0x10000000 #define DRM_MAP_HASH_ORDER 12 -#define DRM_OBJECT_HASH_ORDER 12 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) /* @@ -405,14 +404,6 @@ struct drm_buf_entry { struct drm_freelist freelist; }; - -enum drm_ref_type { - _DRM_REF_USE = 0, - _DRM_REF_TYPE1, - _DRM_NO_REF_TYPES -}; - - /** File private data */ struct drm_file { int authenticated; @@ -424,21 +415,11 @@ struct drm_file { struct drm_minor *minor; unsigned long lock_count; - /* - * The user object hash table is global and resides in the - * drm_device structure. We protect the lists and hash tables with the - * device struct_mutex. A bit coarse-grained but probably the best - * option. - */ - - struct list_head refd_objects; - /** Mapping of mm object handles to object pointers. */ struct idr object_idr; /** Lock for synchronization of access to object_idr. */ spinlock_t table_lock; - struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; struct file *filp; void *driver_priv; @@ -684,7 +665,9 @@ struct drm_gem_object { /* per-master structure */ struct drm_master { - + + struct kref refcount; /* refcount for this master */ + struct list_head head; /**< each minor contains a list of masters */ struct drm_minor *minor; /**< link back to minor we are a master for */ @@ -901,7 +884,6 @@ struct drm_device { int map_count; /**< Number of mappable regions */ struct drm_open_hash map_hash; /**< User token hash table for maps */ struct drm_mm offset_manager; /**< User token manager */ - struct drm_open_hash object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ struct page *ttm_dummy_page; @@ -1366,12 +1348,13 @@ extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern struct drm_master *drm_get_master(struct drm_minor *minor); -extern void drm_put_master(struct drm_master *master); +struct drm_master *drm_master_create(struct drm_minor *minor); +extern struct drm_master *drm_master_get(struct drm_master *master); +extern void drm_master_put(struct drm_master **master); extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, - struct drm_driver *driver); + struct drm_driver *driver); extern int drm_put_dev(struct drm_device *dev); -extern int drm_put_minor(struct drm_device *dev, struct drm_minor **p); +extern int drm_put_minor(struct drm_minor **minor_p); extern unsigned int drm_debug; /* 1 to enable debug output */ extern struct class *drm_class; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index ace6734a..09b3fa39 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -565,18 +565,6 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object **bo) } EXPORT_SYMBOL(drm_bo_usage_deref_locked); -static void drm_bo_base_deref_locked(struct drm_file *file_priv, - struct drm_user_object *uo) -{ - struct drm_buffer_object *bo = - drm_user_object_entry(uo, struct drm_buffer_object, base); - - DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); - - drm_bo_takedown_vm_locked(bo); - drm_bo_usage_deref_locked(&bo); -} - void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo) { struct drm_buffer_object *tmp_bo = *bo; @@ -1068,40 +1056,12 @@ static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo, } /* - * Call dev->struct_mutex locked. - */ - -struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, - uint32_t handle, int check_owner) -{ - struct drm_user_object *uo; - struct drm_buffer_object *bo; - - uo = drm_lookup_user_object(file_priv, handle); - - if (!uo || (uo->type != drm_buffer_type)) { - DRM_ERROR("Could not find buffer object 0x%08x\n", handle); - return NULL; - } - - if (check_owner && file_priv != uo->owner) { - if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE)) - return NULL; - } - - bo = drm_user_object_entry(uo, struct drm_buffer_object, base); - atomic_inc(&bo->usage); - return bo; -} -EXPORT_SYMBOL(drm_lookup_buffer_object); - -/* * Call bo->mutex locked. * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced) +int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced) { struct drm_fence_object *fence = bo->fence; @@ -1158,149 +1118,6 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait) } /* - * Fill in the ioctl reply argument with buffer info. - * Bo locked. - */ - -void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, - struct drm_bo_info_rep *rep) -{ - if (!rep) - return; - - rep->handle = bo->base.hash.key; - rep->flags = bo->mem.flags; - rep->size = bo->num_pages * PAGE_SIZE; - rep->offset = bo->offset; - - /* - * drm_bo_type_device buffers have user-visible - * handles which can be used to share across - * processes. Hand that back to the application - */ - if (bo->type == drm_bo_type_device) - rep->arg_handle = bo->map_list.user_token; - else - rep->arg_handle = 0; - - rep->proposed_flags = bo->mem.proposed_flags; - rep->buffer_start = bo->buffer_start; - rep->fence_flags = bo->fence_type; - rep->rep_flags = 0; - rep->page_alignment = bo->mem.page_alignment; - - if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) { - DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, - DRM_BO_REP_BUSY); - } -} -EXPORT_SYMBOL(drm_bo_fill_rep_arg); - -/* - * Wait for buffer idle and register that we've mapped the buffer. - * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, - * so that if the client dies, the mapping is automatically - * unregistered. - */ - -static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, - uint32_t map_flags, unsigned hint, - struct drm_bo_info_rep *rep) -{ - struct drm_buffer_object *bo; - struct drm_device *dev = file_priv->minor->dev; - int ret = 0; - int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, handle, 1); - mutex_unlock(&dev->struct_mutex); - - if (!bo) - return -EINVAL; - - mutex_lock(&bo->mutex); - do { - bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; - - ret = drm_bo_wait(bo, 0, 1, no_wait, 1); - if (unlikely(ret)) - goto out; - - if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) - drm_bo_evict_cached(bo); - - } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED)); - - atomic_inc(&bo->mapped); - mutex_lock(&dev->struct_mutex); - ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); - mutex_unlock(&dev->struct_mutex); - if (ret) { - if (atomic_dec_and_test(&bo->mapped)) - wake_up_all(&bo->event_queue); - - } else - drm_bo_fill_rep_arg(bo, rep); - - out: - mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(&bo); - - return ret; -} - -static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) -{ - struct drm_device *dev = file_priv->minor->dev; - struct drm_buffer_object *bo; - struct drm_ref_object *ro; - int ret = 0; - - mutex_lock(&dev->struct_mutex); - - bo = drm_lookup_buffer_object(file_priv, handle, 1); - if (!bo) { - ret = -EINVAL; - goto out; - } - - ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); - if (!ro) { - ret = -EINVAL; - goto out; - } - - drm_remove_ref_object(file_priv, ro); - drm_bo_usage_deref_locked(&bo); -out: - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/* - * Call struct-sem locked. - */ - -static void drm_buffer_user_object_unmap(struct drm_file *file_priv, - struct drm_user_object *uo, - enum drm_ref_type action) -{ - struct drm_buffer_object *bo = - drm_user_object_entry(uo, struct drm_buffer_object, base); - - /* - * We DON'T want to take the bo->lock here, because we want to - * hold it when we wait for unmapped buffer. - */ - - BUG_ON(action != _DRM_REF_TYPE1); - - if (atomic_dec_and_test(&bo->mapped)) - wake_up_all(&bo->event_queue); -} - -/* * bo->mutex locked. * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags. */ @@ -1594,8 +1411,7 @@ static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo, int drm_bo_do_validate(struct drm_buffer_object *bo, uint64_t flags, uint64_t mask, uint32_t hint, - uint32_t fence_class, - struct drm_bo_info_rep *rep) + uint32_t fence_class) { int ret; int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0; @@ -1622,132 +1438,12 @@ int drm_bo_do_validate(struct drm_buffer_object *bo, BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); out: - if (rep) - drm_bo_fill_rep_arg(bo, rep); - mutex_unlock(&bo->mutex); return ret; } EXPORT_SYMBOL(drm_bo_do_validate); -/** - * drm_bo_handle_validate - * - * @file_priv: the drm file private, used to get a handle to the user context - * - * @handle: the buffer object handle - * - * @flags: access rights, mapping parameters and cacheability. See - * the DRM_BO_FLAG_* values in drm.h - * - * @mask: Which flag values to change; this allows callers to modify - * things without knowing the current state of other flags. - * - * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* - * values in drm.h. - * - * @fence_class: a driver-specific way of doing fences. Presumably, - * this would be used if the driver had more than one submission and - * fencing mechanism. At this point, there isn't any use of this - * from the user mode code. - * - * @rep: To be stuffed with the reply from validation - * - * @bp_rep: To be stuffed with the buffer object pointer - * - * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead - * of a pointer to a buffer object. Optionally return a pointer to the buffer object. - * This is a convenience wrapper only. - */ - -int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, - uint64_t flags, uint64_t mask, - uint32_t hint, - uint32_t fence_class, - struct drm_bo_info_rep *rep, - struct drm_buffer_object **bo_rep) -{ - struct drm_device *dev = file_priv->minor->dev; - struct drm_buffer_object *bo; - int ret; - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, handle, 1); - mutex_unlock(&dev->struct_mutex); - - if (!bo) - return -EINVAL; - - if (bo->base.owner != file_priv) - mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); - - ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep); - - if (!ret && bo_rep) - *bo_rep = bo; - else - drm_bo_usage_deref_unlocked(&bo); - - return ret; -} -EXPORT_SYMBOL(drm_bo_handle_validate); - - -static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, - struct drm_bo_info_rep *rep) -{ - struct drm_device *dev = file_priv->minor->dev; - struct drm_buffer_object *bo; - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, handle, 1); - mutex_unlock(&dev->struct_mutex); - - if (!bo) - return -EINVAL; - - mutex_lock(&bo->mutex); - - /* - * FIXME: Quick busy here? - */ - - drm_bo_busy(bo, 1); - drm_bo_fill_rep_arg(bo, rep); - mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(&bo); - return 0; -} - -static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, - uint32_t hint, - struct drm_bo_info_rep *rep) -{ - struct drm_device *dev = file_priv->minor->dev; - struct drm_buffer_object *bo; - int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - int ret; - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, handle, 1); - mutex_unlock(&dev->struct_mutex); - - if (!bo) - return -EINVAL; - - mutex_lock(&bo->mutex); - ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1); - if (ret) - goto out; - - drm_bo_fill_rep_arg(bo, rep); -out: - mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(&bo); - return ret; -} - int drm_buffer_object_create(struct drm_device *dev, unsigned long size, enum drm_bo_type type, @@ -1822,7 +1518,7 @@ int drm_buffer_object_create(struct drm_device *dev, mutex_unlock(&bo->mutex); ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE, - 0, NULL); + 0); if (ret) goto out_err_unlocked; @@ -1837,230 +1533,6 @@ out_err_unlocked: } EXPORT_SYMBOL(drm_buffer_object_create); - -int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, int shareable) -{ - struct drm_device *dev = file_priv->minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(file_priv, &bo->base, shareable); - if (ret) - goto out; - - bo->base.remove = drm_bo_base_deref_locked; - bo->base.type = drm_buffer_type; - bo->base.ref_struct_locked = NULL; - bo->base.unref = drm_buffer_user_object_unmap; - -out: - mutex_unlock(&dev->struct_mutex); - return ret; -} -EXPORT_SYMBOL(drm_bo_add_user_object); - -int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_create_arg *arg = data; - struct drm_bo_create_req *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - struct drm_buffer_object *entry; - enum drm_bo_type bo_type; - int ret = 0; - - DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", - (int)(req->size / 1024), req->page_alignment * 4); - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - /* - * If the buffer creation request comes in with a starting address, - * that points at the desired user pages to map. Otherwise, create - * a drm_bo_type_device buffer, which uses pages allocated from the kernel - */ - bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device; - - /* - * User buffers cannot be shared - */ - if (bo_type == drm_bo_type_user) - req->flags &= ~DRM_BO_FLAG_SHAREABLE; - - ret = drm_buffer_object_create(file_priv->minor->dev, - req->size, bo_type, req->flags, - req->hint, req->page_alignment, - req->buffer_start, &entry); - if (ret) - goto out; - - ret = drm_bo_add_user_object(file_priv, entry, - req->flags & DRM_BO_FLAG_SHAREABLE); - if (ret) { - drm_bo_usage_deref_unlocked(&entry); - goto out; - } - - mutex_lock(&entry->mutex); - drm_bo_fill_rep_arg(entry, rep); - mutex_unlock(&entry->mutex); - -out: - return ret; -} - -int drm_bo_setstatus_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_bo_map_wait_idle_arg *arg = data; - struct drm_bo_info_req *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - struct drm_buffer_object *bo; - int ret; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); - if (ret) - return ret; - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, req->handle, 1); - mutex_unlock(&dev->struct_mutex); - - if (!bo) - return -EINVAL; - - if (bo->base.owner != file_priv) - req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); - - ret = drm_bo_do_validate(bo, req->flags, req->mask, - req->hint | DRM_BO_HINT_DONT_FENCE, - bo->fence_class, rep); - - drm_bo_usage_deref_unlocked(&bo); - - (void) drm_bo_read_unlock(&dev->bm.bm_lock); - - return ret; -} - -int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_map_wait_idle_arg *arg = data; - struct drm_bo_info_req *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - int ret; - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - ret = drm_buffer_object_map(file_priv, req->handle, req->mask, - req->hint, rep); - if (ret) - return ret; - - return 0; -} - -int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_handle_arg *arg = data; - int ret; - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - ret = drm_buffer_object_unmap(file_priv, arg->handle); - return ret; -} - - -int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_reference_info_arg *arg = data; - struct drm_bo_handle_arg *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - struct drm_user_object *uo; - int ret; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - ret = drm_user_object_ref(file_priv, req->handle, - drm_buffer_type, &uo); - if (ret) - return ret; - - ret = drm_bo_handle_info(file_priv, req->handle, rep); - if (ret) - return ret; - - return 0; -} - -int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_handle_arg *arg = data; - int ret = 0; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type); - return ret; -} - -int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_reference_info_arg *arg = data; - struct drm_bo_handle_arg *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - int ret; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - ret = drm_bo_handle_info(file_priv, req->handle, rep); - if (ret) - return ret; - - return 0; -} - -int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_map_wait_idle_arg *arg = data; - struct drm_bo_info_req *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - int ret; - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - ret = drm_bo_handle_wait(file_priv, req->handle, - req->hint, rep); - if (ret) - return ret; - - return 0; -} - static int drm_bo_leave_list(struct drm_buffer_object *bo, uint32_t mem_type, int free_pinned, @@ -2240,7 +1712,7 @@ EXPORT_SYMBOL(drm_bo_clean_mm); *point since we have the hardware lock. */ -static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type) +int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type) { int ret; struct drm_buffer_manager *bm = &dev->bm; @@ -2389,7 +1861,6 @@ int drm_bo_driver_init(struct drm_device *dev) int ret = -EINVAL; bm->dummy_read_page = NULL; - drm_bo_init_lock(&bm->bm_lock); mutex_lock(&dev->struct_mutex); if (!driver) goto out_unlock; @@ -2435,191 +1906,6 @@ out_unlock: } EXPORT_SYMBOL(drm_bo_driver_init); -int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_mm_init_arg *arg = data; - struct drm_buffer_manager *bm = &dev->bm; - struct drm_bo_driver *driver = dev->driver->bo_driver; - int ret; - - if (!driver) { - DRM_ERROR("Buffer objects are not supported by this driver\n"); - return -EINVAL; - } - - ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv); - if (ret) - return ret; - - ret = -EINVAL; - if (arg->magic != DRM_BO_INIT_MAGIC) { - DRM_ERROR("You are using an old libdrm that is not compatible with\n" - "\tthe kernel DRM module. Please upgrade your libdrm.\n"); - return -EINVAL; - } - if (arg->major != DRM_BO_INIT_MAJOR) { - DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" - "\tversion don't match. Got %d, expected %d.\n", - arg->major, DRM_BO_INIT_MAJOR); - return -EINVAL; - } - - mutex_lock(&dev->struct_mutex); - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized.\n"); - goto out; - } - if (arg->mem_type == 0) { - DRM_ERROR("System memory buffers already initialized.\n"); - goto out; - } - ret = drm_bo_init_mm(dev, arg->mem_type, - arg->p_offset, arg->p_size, 0); - -out: - mutex_unlock(&dev->struct_mutex); - (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); - - if (ret) - return ret; - - return 0; -} - -int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_mm_type_arg *arg = data; - struct drm_buffer_manager *bm = &dev->bm; - struct drm_bo_driver *driver = dev->driver->bo_driver; - int ret; - - if (!driver) { - DRM_ERROR("Buffer objects are not supported by this driver\n"); - return -EINVAL; - } - - ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv); - if (ret) - return ret; - - mutex_lock(&dev->struct_mutex); - ret = -EINVAL; - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized\n"); - goto out; - } - if (arg->mem_type == 0) { - DRM_ERROR("No takedown for System memory buffers.\n"); - goto out; - } - ret = 0; - if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) { - if (ret == -EINVAL) - DRM_ERROR("Memory manager type %d not clean. " - "Delaying takedown\n", arg->mem_type); - ret = 0; - } -out: - mutex_unlock(&dev->struct_mutex); - (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); - - if (ret) - return ret; - - return 0; -} - -int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_mm_type_arg *arg = data; - struct drm_bo_driver *driver = dev->driver->bo_driver; - int ret; - - if (!driver) { - DRM_ERROR("Buffer objects are not supported by this driver\n"); - return -EINVAL; - } - - if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) { - DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n"); - return -EINVAL; - } - - if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { - ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv); - if (ret) - return ret; - } - - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_mm(dev, arg->mem_type); - mutex_unlock(&dev->struct_mutex); - if (ret) { - (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); - return ret; - } - - return 0; -} - -int drm_mm_unlock_ioctl(struct drm_device *dev, - void *data, - struct drm_file *file_priv) -{ - struct drm_mm_type_arg *arg = data; - struct drm_bo_driver *driver = dev->driver->bo_driver; - int ret; - - if (!driver) { - DRM_ERROR("Buffer objects are not supported by this driver\n"); - return -EINVAL; - } - - if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { - ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); - if (ret) - return ret; - } - - return 0; -} - -int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_mm_info_arg *arg = data; - struct drm_buffer_manager *bm = &dev->bm; - struct drm_bo_driver *driver = dev->driver->bo_driver; - struct drm_mem_type_manager *man; - int ret = 0; - int mem_type = arg->mem_type; - - if (!driver) { - DRM_ERROR("Buffer objects are not supported by this driver\n"); - return -EINVAL; - } - - if (mem_type >= DRM_BO_MEM_TYPES) { - DRM_ERROR("Illegal memory type %d\n", arg->mem_type); - return -EINVAL; - } - - mutex_lock(&dev->struct_mutex); - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized\n"); - ret = -EINVAL; - goto out; - } - - - man = &bm->man[arg->mem_type]; - - arg->p_size = man->size; - -out: - mutex_unlock(&dev->struct_mutex); - - return ret; -} /* * buffer object vm functions. */ @@ -2792,15 +2078,3 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) return 0; } - -int drm_bo_version_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data; - - arg->major = DRM_BO_INIT_MAJOR; - arg->minor = DRM_BO_INIT_MINOR; - arg->patchlevel = DRM_BO_INIT_PATCH; - - return 0; -} diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c deleted file mode 100644 index 08b1c6be..00000000 --- a/linux-core/drm_bo_lock.c +++ /dev/null @@ -1,189 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -/* - * This file implements a simple replacement for the buffer manager use - * of the heavyweight hardware lock. - * The lock is a read-write lock. Taking it in read mode is fast, and - * intended for in-kernel use only. - * Taking it in write mode is slow. - * - * The write mode is used only when there is a need to block all - * user-space processes from allocating a - * new memory area. - * Typical use in write mode is X server VT switching, and it's allowed - * to leave kernel space with the write lock held. If a user-space process - * dies while having the write-lock, it will be released during the file - * descriptor release. - * - * The read lock is typically placed at the start of an IOCTL- or - * user-space callable function that may end up allocating a memory area. - * This includes setstatus, super-ioctls and no_pfn; the latter may move - * unmappable regions to mappable. It's a bug to leave kernel space with the - * read lock held. - * - * Both read- and write lock taking may be interruptible for low signal-delivery - * latency. The locking functions will return -EAGAIN if interrupted by a - * signal. - * - * Locking order: The lock should be taken BEFORE any kernel mutexes - * or spinlocks. - */ - -#include "drmP.h" - -void drm_bo_init_lock(struct drm_bo_lock *lock) -{ - DRM_INIT_WAITQUEUE(&lock->queue); - atomic_set(&lock->write_lock_pending, 0); - atomic_set(&lock->readers, 0); -} - -void drm_bo_read_unlock(struct drm_bo_lock *lock) -{ - if (atomic_dec_and_test(&lock->readers)) - wake_up_all(&lock->queue); -} -EXPORT_SYMBOL(drm_bo_read_unlock); - -int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible) -{ - while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { - int ret; - - if (!interruptible) { - wait_event(lock->queue, - atomic_read(&lock->write_lock_pending) == 0); - continue; - } - ret = wait_event_interruptible - (lock->queue, atomic_read(&lock->write_lock_pending) == 0); - if (ret) - return -EAGAIN; - } - - while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { - int ret; - if (!interruptible) { - wait_event(lock->queue, - atomic_read(&lock->readers) != -1); - continue; - } - ret = wait_event_interruptible - (lock->queue, atomic_read(&lock->readers) != -1); - if (ret) - return -EAGAIN; - } - return 0; -} -EXPORT_SYMBOL(drm_bo_read_lock); - -static int __drm_bo_write_unlock(struct drm_bo_lock *lock) -{ - if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) - return -EINVAL; - wake_up_all(&lock->queue); - return 0; -} - -static void drm_bo_write_lock_remove(struct drm_file *file_priv, - struct drm_user_object *item) -{ - struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base); - int ret; - - ret = __drm_bo_write_unlock(lock); - BUG_ON(ret); -} - -int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible, - struct drm_file *file_priv) -{ - int ret = 0; - struct drm_device *dev; - - atomic_inc(&lock->write_lock_pending); - - while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { - if (!interruptible) { - wait_event(lock->queue, - atomic_read(&lock->readers) == 0); - continue; - } - ret = wait_event_interruptible - (lock->queue, atomic_read(&lock->readers) == 0); - - if (ret) { - atomic_dec(&lock->write_lock_pending); - wake_up_all(&lock->queue); - return -EAGAIN; - } - } - - /* - * Add a dummy user-object, the destructor of which will - * make sure the lock is released if the client dies - * while holding it. - */ - - if (atomic_dec_and_test(&lock->write_lock_pending)) - wake_up_all(&lock->queue); - dev = file_priv->minor->dev; - mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(file_priv, &lock->base, 0); - lock->base.remove = &drm_bo_write_lock_remove; - lock->base.type = drm_lock_type; - if (ret) - (void)__drm_bo_write_unlock(lock); - - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) -{ - struct drm_device *dev = file_priv->minor->dev; - struct drm_ref_object *ro; - - mutex_lock(&dev->struct_mutex); - - if (lock->base.owner != file_priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE); - BUG_ON(!ro); - drm_remove_ref_object(file_priv, ro); - lock->base.owner = NULL; - - mutex_unlock(&dev->struct_mutex); - return 0; -} diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index e9052570..c966badc 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -1528,6 +1528,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, dev->buf_use++; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); + DRM_DEBUG("dma buf count %d, req count %d\n", request->count, dma->buf_count); if (request->count >= dma->buf_count) { if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) @@ -1538,10 +1539,12 @@ int drm_mapbufs(struct drm_device *dev, void *data, unsigned long token = dev->agp_buffer_token; if (!map) { + DRM_DEBUG("No map\n"); retcode = -EINVAL; goto done; } down_write(¤t->mm->mmap_sem); + DRM_DEBUG("%x %d\n", token, map->size); virtual = do_mmap(file_priv->filp, 0, map->size, PROT_READ | PROT_WRITE, MAP_SHARED, @@ -1555,6 +1558,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, up_write(¤t->mm->mmap_sem); } if (virtual > -1024UL) { + DRM_DEBUG("mmap failed\n"); /* Real error */ retcode = (signed long)virtual; goto done; diff --git a/linux-core/drm_crtc_helper.c b/linux-core/drm_crtc_helper.c index e0d93606..dc18dbb4 100644 --- a/linux-core/drm_crtc_helper.c +++ b/linux-core/drm_crtc_helper.c @@ -203,7 +203,7 @@ EXPORT_SYMBOL(drm_helper_disable_unused_functions); * LOCKING: * Caller must hold mode config lock. */ -static void drm_pick_crtcs (struct drm_device *dev) +void drm_pick_crtcs (struct drm_device *dev) { int c, o, assigned; struct drm_connector *connector, *connector_equal; @@ -715,48 +715,4 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, } EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); -/** - * drm_get_buffer_object - find the buffer object for a given handle - * @dev: DRM device - * @bo: pointer to caller's buffer_object pointer - * @handle: handle to lookup - * - * LOCKING: - * Must take @dev's struct_mutex to protect buffer object lookup. - * - * Given @handle, lookup the buffer object in @dev and put it in the caller's - * @bo pointer. - * - * RETURNS: - * Zero on success, -EINVAL if the handle couldn't be found. - */ -int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle) -{ - struct drm_user_object *uo; - struct drm_hash_item *hash; - int ret; - - *bo = NULL; - - mutex_lock(&dev->struct_mutex); - ret = drm_ht_find_item(&dev->object_hash, handle, &hash); - if (ret) { - DRM_ERROR("Couldn't find handle.\n"); - ret = -EINVAL; - goto out_err; - } - - uo = drm_hash_entry(hash, struct drm_user_object, hash); - if (uo->type != drm_buffer_type) { - ret = -EINVAL; - goto out_err; - } - - *bo = drm_user_object_entry(uo, struct drm_buffer_object, base); - ret = 0; -out_err: - mutex_unlock(&dev->struct_mutex); - return ret; -} -EXPORT_SYMBOL(drm_get_buffer_object); diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index f7bff0ac..1e6aeefd 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -58,6 +58,7 @@ int drm_dma_setup(struct drm_device *dev) return 0; } +EXPORT_SYMBOL(drm_dma_setup); /** * Cleanup the DMA resources. @@ -120,6 +121,7 @@ void drm_dma_takedown(struct drm_device *dev) drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); dev->dma = NULL; } +EXPORT_SYMBOL(drm_dma_takedown); /** * Free a buffer. diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index aefbd76d..11044bff 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -146,36 +146,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), - - DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0), - #if OS_HAS_GEM DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), @@ -202,8 +172,6 @@ int drm_lastclose(struct drm_device * dev) DRM_DEBUG("\n"); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - drm_bo_driver_finish(dev); /* * We can't do much about this function failing. @@ -213,9 +181,11 @@ int drm_lastclose(struct drm_device * dev) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + drm_bo_driver_finish(dev); -/* if (dev->irq_enabled) - drm_irq_uninstall(dev); */ + if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) + drm_irq_uninstall(dev); /* Free drawable information memory */ mutex_lock(&dev->struct_mutex); @@ -223,13 +193,8 @@ int drm_lastclose(struct drm_device * dev) drm_drawable_free_all(dev); del_timer(&dev->timer); - if (dev->primary->master) { - drm_put_master(dev->primary->master); - dev->primary->master = NULL; - } - /* Clear AGP information */ - if (drm_core_has_AGP(dev) && dev->agp) { + if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp @@ -287,7 +252,7 @@ int drm_lastclose(struct drm_device * dev) } dev->queue_count = 0; - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) drm_dma_takedown(dev); dev->dev_mapping = NULL; @@ -431,11 +396,11 @@ static void drm_cleanup(struct drm_device * dev) drm_ctxbitmap_cleanup(dev); drm_ht_remove(&dev->map_hash); drm_mm_takedown(&dev->offset_manager); - drm_ht_remove(&dev->object_hash); - drm_put_minor(dev, &dev->primary); if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_put_minor(dev, &dev->control); + drm_put_minor(&dev->control); + + drm_put_minor(&dev->primary); if (drm_put_dev(dev)) DRM_ERROR("Cannot unload module\n"); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 7c78e09f..7130d1b0 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -134,8 +134,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, if (new_type) { fence->signaled_types |= new_type; - DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", - fence->base.hash.key, fence->signaled_types); + DRM_DEBUG("Fence %p signaled 0x%08x\n", + fence, fence->signaled_types); if (driver->needed_flush) fc->pending_flush |= driver->needed_flush(fence); @@ -147,8 +147,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; if (!(fence->type & ~fence->signaled_types)) { - DRM_DEBUG("Fence completely signaled 0x%08lx\n", - fence->base.hash.key); + DRM_DEBUG("Fence completely signaled %p\n", + fence); list_del_init(&fence->ring); } } @@ -196,10 +196,9 @@ void drm_fence_usage_deref_locked(struct drm_fence_object **fence) *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { drm_fence_unring(dev, &tmp_fence->ring); - DRM_DEBUG("Destroyed a fence object 0x%08lx\n", - tmp_fence->base.hash.key); + DRM_DEBUG("Destroyed a fence object %p\n", + tmp_fence); atomic_dec(&fm->count); - BUG_ON(!list_empty(&tmp_fence->base.list)); drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } } @@ -217,7 +216,6 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence) if (atomic_read(&tmp_fence->usage) == 0) { drm_fence_unring(dev, &tmp_fence->ring); atomic_dec(&fm->count); - BUG_ON(!list_empty(&tmp_fence->base.list)); drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); @@ -244,15 +242,6 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, } EXPORT_SYMBOL(drm_fence_reference_unlocked); -static void drm_fence_object_destroy(struct drm_file *priv, - struct drm_user_object *base) -{ - struct drm_fence_object *fence = - drm_user_object_entry(base, struct drm_fence_object, base); - - drm_fence_usage_deref_locked(&fence); -} - int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask) { unsigned long flags; @@ -477,7 +466,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class, * Avoid hitting BUG() for kernel-only fence objects. */ - INIT_LIST_HEAD(&fence->base.list); fence->fence_class = fence_class; fence->type = type; fence->signaled_types = 0; @@ -493,26 +481,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class, return ret; } -int drm_fence_add_user_object(struct drm_file *priv, - struct drm_fence_object *fence, int shareable) -{ - struct drm_device *dev = priv->minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(priv, &fence->base, shareable); - if (ret) - goto out; - atomic_inc(&fence->usage); - fence->base.type = drm_fence_type; - fence->base.remove = &drm_fence_object_destroy; - DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); -out: - mutex_unlock(&dev->struct_mutex); - return ret; -} -EXPORT_SYMBOL(drm_fence_add_user_object); - int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class, uint32_t type, unsigned flags, struct drm_fence_object **c_fence) @@ -569,261 +537,7 @@ void drm_fence_manager_init(struct drm_device *dev) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_fill_arg(struct drm_fence_object *fence, - struct drm_fence_arg *arg) -{ - struct drm_device *dev = fence->dev; - struct drm_fence_manager *fm = &dev->fm; - unsigned long irq_flags; - - read_lock_irqsave(&fm->lock, irq_flags); - arg->handle = fence->base.hash.key; - arg->fence_class = fence->fence_class; - arg->type = fence->type; - arg->signaled = fence->signaled_types; - arg->error = fence->error; - arg->sequence = fence->sequence; - read_unlock_irqrestore(&fm->lock, irq_flags); -} -EXPORT_SYMBOL(drm_fence_fill_arg); - void drm_fence_manager_takedown(struct drm_device *dev) { } -struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv, - uint32_t handle) -{ - struct drm_device *dev = priv->minor->dev; - struct drm_user_object *uo; - struct drm_fence_object *fence; - - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, handle); - if (!uo || (uo->type != drm_fence_type)) { - mutex_unlock(&dev->struct_mutex); - return NULL; - } - fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); - mutex_unlock(&dev->struct_mutex); - return fence; -} - -int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_fence_object *fence; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - if (arg->flags & DRM_FENCE_FLAG_EMIT) - LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_object_create(dev, arg->fence_class, - arg->type, arg->flags, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(file_priv, fence, - arg->flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) { - drm_fence_usage_deref_unlocked(&fence); - return ret; - } - - /* - * usage > 0. No need to lock dev->struct_mutex; - */ - - arg->handle = fence->base.hash.key; - - drm_fence_fill_arg(fence, arg); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} - -int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_fence_object *fence; - struct drm_user_object *uo; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); - if (ret) - return ret; - fence = drm_lookup_fence_object(file_priv, arg->handle); - drm_fence_fill_arg(fence, arg); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} - - -int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); -} - -int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_fence_object *fence; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - fence = drm_lookup_fence_object(file_priv, arg->handle); - if (!fence) - return -EINVAL; - - drm_fence_fill_arg(fence, arg); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} - -int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_fence_object *fence; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - fence = drm_lookup_fence_object(file_priv, arg->handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_flush(fence, arg->type); - - drm_fence_fill_arg(fence, arg); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} - - -int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_fence_object *fence; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - fence = drm_lookup_fence_object(file_priv, arg->handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_wait(fence, - arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, - 0, arg->type); - - drm_fence_fill_arg(fence, arg); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} - - -int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_fence_object *fence; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - LOCK_TEST_WITH_RETURN(dev, file_priv); - fence = drm_lookup_fence_object(file_priv, arg->handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, - arg->type); - - drm_fence_fill_arg(fence, arg); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} - -int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_fence_object *fence; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized\n"); - return -EINVAL; - } - LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(dev, NULL, arg->flags, - NULL, &fence); - if (ret) - return ret; - - if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { - ret = drm_fence_add_user_object(file_priv, fence, - arg->flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; - } - - arg->handle = fence->base.hash.key; - - drm_fence_fill_arg(fence, arg); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 3b3a0a3c..7bc73d26 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -54,10 +54,11 @@ static int drm_setup(struct drm_device * dev) atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); - dev->buf_use = 0; - atomic_set(&dev->buf_alloc, 0); - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) { + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + i = drm_dma_setup(dev); if (i < 0) return i; @@ -221,7 +222,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp, int minor_id = iminor(inode); struct drm_file *priv; int ret; - int i, j; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ @@ -246,22 +246,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); - INIT_LIST_HEAD(&priv->refd_objects); INIT_LIST_HEAD(&priv->fbs); - for (i = 0; i < _DRM_NO_REF_TYPES; ++i) { - ret = drm_ht_create(&priv->refd_object_hash[i], - DRM_FILE_HASH_ORDER); - if (ret) - break; - } - - if (ret) { - for (j = 0; j < i; ++j) - drm_ht_remove(&priv->refd_object_hash[j]); - goto out_free; - } - if (dev->driver->driver_features & DRIVER_GEM) drm_gem_open(dev, priv); @@ -275,28 +261,34 @@ static int drm_open_helper(struct inode *inode, struct file *filp, /* if there is no current master make this fd it */ mutex_lock(&dev->struct_mutex); if (!priv->minor->master) { - priv->minor->master = drm_get_master(priv->minor); + /* create a new master */ + priv->minor->master = drm_master_create(priv->minor); if (!priv->minor->master) { ret = -ENOMEM; goto out_free; } priv->is_master = 1; - priv->master = priv->minor->master; + /* take another reference for the copy in the local file priv */ + priv->master = drm_master_get(priv->minor->master); priv->authenticated = 1; + mutex_unlock(&dev->struct_mutex); if (dev->driver->master_create) { ret = dev->driver->master_create(dev, priv->master); if (ret) { - drm_put_master(priv->minor->master); - priv->minor->master = priv->master = NULL; + mutex_lock(&dev->struct_mutex); + /* drop both references if this fails */ + drm_master_put(&priv->minor->master); + drm_master_put(&priv->master); mutex_unlock(&dev->struct_mutex); goto out_free; } } } else { - priv->master = priv->minor->master; + /* get a reference to the master */ + priv->master = drm_master_get(priv->minor->master); mutex_unlock(&dev->struct_mutex); } @@ -346,33 +338,6 @@ int drm_fasync(int fd, struct file *filp, int on) } EXPORT_SYMBOL(drm_fasync); -static void drm_object_release(struct file *filp) -{ - struct drm_file *priv = filp->private_data; - struct list_head *head; - struct drm_ref_object *ref_object; - int i; - - /* - * Free leftover ref objects created by me. Note that we cannot use - * list_for_each() here, as the struct_mutex may be temporarily - * released by the remove_() functions, and thus the lists may be - * altered. - * Also, a drm_remove_ref_object() will not remove it - * from the list unless its refcount is 1. - */ - - head = &priv->refd_objects; - while (head->next != head) { - ref_object = list_entry(head->next, struct drm_ref_object, list); - drm_remove_ref_object(priv, ref_object); - head = &priv->refd_objects; - } - - for (i = 0; i < _DRM_NO_REF_TYPES; ++i) - drm_ht_remove(&priv->refd_object_hash[i]); -} - /** * Release file. * @@ -495,6 +460,8 @@ int drm_release(struct inode *inode, struct file *filp) if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_fb_release(filp); + mutex_lock(&dev->struct_mutex); + if (file_priv->is_master) { struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) { @@ -503,19 +470,17 @@ int drm_release(struct inode *inode, struct file *filp) temp->authenticated = 0; } - if (file_priv->minor->master == file_priv->master) - file_priv->minor->master = NULL; - drm_put_master(file_priv->master); + if (file_priv->minor->master == file_priv->master) { + /* drop the reference held my the minor */ + drm_master_put(&file_priv->minor->master); + } } - file_priv->master = NULL; + /* drop the reference held my the file priv */ + drm_master_put(&file_priv->master); file_priv->is_master = 0; - mutex_lock(&dev->struct_mutex); - drm_object_release(filp); - list_del(&file_priv->lhead); - mutex_unlock(&dev->struct_mutex); if (dev->driver->postclose) diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index b90fc020..fe648933 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -188,6 +188,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) } return pt; } +EXPORT_SYMBOL(drm_realloc); /** * Allocate pages. diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c deleted file mode 100644 index 2994b716..00000000 --- a/linux-core/drm_object.c +++ /dev/null @@ -1,294 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -#include "drmP.h" - -int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, - int shareable) -{ - struct drm_device *dev = priv->minor->dev; - int ret; - - DRM_ASSERT_LOCKED(&dev->struct_mutex); - - /* The refcount will be bumped to 1 when we add the ref object below. */ - atomic_set(&item->refcount, 0); - item->shareable = shareable; - item->owner = priv; - - ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash, - (unsigned long)item, 31, 0, 0); - if (ret) - return ret; - - ret = drm_add_ref_object(priv, item, _DRM_REF_USE); - if (ret) - ret = drm_ht_remove_item(&dev->object_hash, &item->hash); - - return ret; -} -EXPORT_SYMBOL(drm_add_user_object); - -struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key) -{ - struct drm_device *dev = priv->minor->dev; - struct drm_hash_item *hash; - int ret; - struct drm_user_object *item; - - DRM_ASSERT_LOCKED(&dev->struct_mutex); - - ret = drm_ht_find_item(&dev->object_hash, key, &hash); - if (ret) - return NULL; - - item = drm_hash_entry(hash, struct drm_user_object, hash); - - if (priv != item->owner) { - struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; - ret = drm_ht_find_item(ht, (unsigned long)item, &hash); - if (ret) { - DRM_ERROR("Object not registered for usage\n"); - return NULL; - } - } - return item; -} -EXPORT_SYMBOL(drm_lookup_user_object); - -static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item) -{ - struct drm_device *dev = priv->minor->dev; - int ret; - - if (atomic_dec_and_test(&item->refcount)) { - ret = drm_ht_remove_item(&dev->object_hash, &item->hash); - BUG_ON(ret); - item->remove(priv, item); - } -} - -static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro, - enum drm_ref_type action) -{ - int ret = 0; - - switch (action) { - case _DRM_REF_USE: - atomic_inc(&ro->refcount); - break; - default: - if (!ro->ref_struct_locked) { - break; - } else { - ro->ref_struct_locked(priv, ro, action); - } - } - return ret; -} - -int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object, - enum drm_ref_type ref_action) -{ - int ret = 0; - struct drm_ref_object *item; - struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; - - DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); - if (!referenced_object->shareable && priv != referenced_object->owner) { - DRM_ERROR("Not allowed to reference this object\n"); - return -EINVAL; - } - - /* - * If this is not a usage reference, Check that usage has been registered - * first. Otherwise strange things may happen on destruction. - */ - - if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) { - item = - drm_lookup_ref_object(priv, referenced_object, - _DRM_REF_USE); - if (!item) { - DRM_ERROR - ("Object not registered for usage by this client\n"); - return -EINVAL; - } - } - - if (NULL != - (item = - drm_lookup_ref_object(priv, referenced_object, ref_action))) { - atomic_inc(&item->refcount); - return drm_object_ref_action(priv, referenced_object, - ref_action); - } - - item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS); - if (item == NULL) { - DRM_ERROR("Could not allocate reference object\n"); - return -ENOMEM; - } - - atomic_set(&item->refcount, 1); - item->hash.key = (unsigned long)referenced_object; - ret = drm_ht_insert_item(ht, &item->hash); - item->unref_action = ref_action; - - if (ret) - goto out; - - list_add(&item->list, &priv->refd_objects); - ret = drm_object_ref_action(priv, referenced_object, ref_action); -out: - return ret; -} - -struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, - struct drm_user_object *referenced_object, - enum drm_ref_type ref_action) -{ - struct drm_hash_item *hash; - int ret; - - DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); - ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], - (unsigned long)referenced_object, &hash); - if (ret) - return NULL; - - return drm_hash_entry(hash, struct drm_ref_object, hash); -} -EXPORT_SYMBOL(drm_lookup_ref_object); - -static void drm_remove_other_references(struct drm_file *priv, - struct drm_user_object *ro) -{ - int i; - struct drm_open_hash *ht; - struct drm_hash_item *hash; - struct drm_ref_object *item; - - for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { - ht = &priv->refd_object_hash[i]; - while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { - item = drm_hash_entry(hash, struct drm_ref_object, hash); - drm_remove_ref_object(priv, item); - } - } -} - -void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item) -{ - int ret; - struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; - struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; - enum drm_ref_type unref_action; - - DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); - unref_action = item->unref_action; - if (atomic_dec_and_test(&item->refcount)) { - ret = drm_ht_remove_item(ht, &item->hash); - BUG_ON(ret); - list_del_init(&item->list); - if (unref_action == _DRM_REF_USE) - drm_remove_other_references(priv, user_object); - drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS); - } - - switch (unref_action) { - case _DRM_REF_USE: - drm_deref_user_object(priv, user_object); - break; - default: - BUG_ON(!user_object->unref); - user_object->unref(priv, user_object, unref_action); - break; - } - -} -EXPORT_SYMBOL(drm_remove_ref_object); - -int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, - enum drm_object_type type, struct drm_user_object **object) -{ - struct drm_device *dev = priv->minor->dev; - struct drm_user_object *uo; - struct drm_hash_item *hash; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm_ht_find_item(&dev->object_hash, user_token, &hash); - if (ret) { - DRM_ERROR("Could not find user object to reference.\n"); - goto out_err; - } - uo = drm_hash_entry(hash, struct drm_user_object, hash); - if (uo->type != type) { - ret = -EINVAL; - goto out_err; - } - ret = drm_add_ref_object(priv, uo, _DRM_REF_USE); - if (ret) - goto out_err; - mutex_unlock(&dev->struct_mutex); - *object = uo; - return 0; -out_err: - mutex_unlock(&dev->struct_mutex); - return ret; -} - -int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, - enum drm_object_type type) -{ - struct drm_device *dev = priv->minor->dev; - struct drm_user_object *uo; - struct drm_ref_object *ro; - int ret; - - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, user_token); - if (!uo || (uo->type != type)) { - ret = -EINVAL; - goto out_err; - } - ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE); - if (!ro) { - ret = -EINVAL; - goto out_err; - } - drm_remove_ref_object(priv, ro); - mutex_unlock(&dev->struct_mutex); - return 0; -out_err: - mutex_unlock(&dev->struct_mutex); - return ret; -} diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index d8e8ee32..925b4d67 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -34,110 +34,201 @@ struct drm_device; struct drm_bo_mem_reg; -/*************************************************** - * User space objects. (drm_object.c) - */ - -#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) - -enum drm_object_type { - drm_fence_type, - drm_buffer_type, - drm_lock_type, - /* - * Add other user space object types here. - */ - drm_driver_type0 = 256, - drm_driver_type1, - drm_driver_type2, - drm_driver_type3, - drm_driver_type4 +#define DRM_FENCE_FLAG_EMIT 0x00000001 +#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 +/** + * On hardware with no interrupt events for operation completion, + * indicates that the kernel should sleep while waiting for any blocking + * operation to complete rather than spinning. + * + * Has no effect otherwise. + */ +#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 +#define DRM_FENCE_FLAG_NO_USER 0x00000010 + +/* Reserved for driver use */ +#define DRM_FENCE_MASK_DRIVER 0xFF000000 + +#define DRM_FENCE_TYPE_EXE 0x00000001 + +struct drm_fence_arg { + unsigned int handle; + unsigned int fence_class; + unsigned int type; + unsigned int flags; + unsigned int signaled; + unsigned int error; + unsigned int sequence; + unsigned int pad64; + uint64_t expand_pad[2]; /*Future expansion */ }; +/* Buffer permissions, referring to how the GPU uses the buffers. + * these translate to fence types used for the buffers. + * Typically a texture buffer is read, A destination buffer is write and + * a command (batch-) buffer is exe. Can be or-ed together. + */ + +#define DRM_BO_FLAG_READ (1ULL << 0) +#define DRM_BO_FLAG_WRITE (1ULL << 1) +#define DRM_BO_FLAG_EXE (1ULL << 2) + /* - * A user object is a structure that helps the drm give out user handles - * to kernel internal objects and to keep track of these objects so that - * they can be destroyed, for example when the user space process exits. - * Designed to be accessible using a user space 32-bit handle. - */ - -struct drm_user_object { - struct drm_hash_item hash; - struct list_head list; - enum drm_object_type type; - atomic_t refcount; - int shareable; - struct drm_file *owner; - void (*ref_struct_locked) (struct drm_file *priv, - struct drm_user_object *obj, - enum drm_ref_type ref_action); - void (*unref) (struct drm_file *priv, struct drm_user_object *obj, - enum drm_ref_type unref_action); - void (*remove) (struct drm_file *priv, struct drm_user_object *obj); -}; + * All of the bits related to access mode + */ +#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) +/* + * Status flags. Can be read to determine the actual state of a buffer. + * Can also be set in the buffer mask before validation. + */ /* - * A ref object is a structure which is used to - * keep track of references to user objects and to keep track of these - * references so that they can be destroyed for example when the user space - * process exits. Designed to be accessible using a pointer to the _user_ object. + * Mask: Never evict this buffer. Not even with force. This type of buffer is only + * available to root and must be manually removed before buffer manager shutdown + * or lock. + * Flags: Acknowledge */ +#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) -struct drm_ref_object { - struct drm_hash_item hash; - struct list_head list; - atomic_t refcount; - enum drm_ref_type unref_action; -}; +/* + * Mask: Require that the buffer is placed in mappable memory when validated. + * If not set the buffer may or may not be in mappable memory when validated. + * Flags: If set, the buffer is in mappable memory. + */ +#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) -/** - * Must be called with the struct_mutex held. +/* Mask: The buffer should be shareable with other processes. + * Flags: The buffer is shareable with other processes. */ +#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) -extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, - int shareable); -/** - * Must be called with the struct_mutex held. +/* Mask: If set, place the buffer in cache-coherent memory if available. + * If clear, never place the buffer in cache coherent memory if validated. + * Flags: The buffer is currently in cache-coherent memory. + */ +#define DRM_BO_FLAG_CACHED (1ULL << 7) + +/* Mask: Make sure that every time this buffer is validated, + * it ends up on the same location provided that the memory mask is the same. + * The buffer will also not be evicted when claiming space for + * other buffers. Basically a pinned buffer but it may be thrown out as + * part of buffer manager shutdown or locking. + * Flags: Acknowledge. */ +#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) + +/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction + * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART + * with unsnooped PTEs instead of snooped, by using chipset-specific cache + * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, + * as the eviction to local memory (TTM unbind) on map is just a side effect + * to prevent aggressive cache prefetch from the GPU disturbing the cache + * management that the DRM is doing. + * + * Flags: Acknowledge. + * Buffers allocated with this flag should not be used for suballocators + * This type may have issues on CPUs with over-aggressive caching + * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 + */ +#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) -extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, - uint32_t key); + +/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) /* - * Must be called with the struct_mutex held. May temporarily release it. + * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. + * Flags: Acknowledge. */ +#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) +#define DRM_BO_FLAG_TILE (1ULL << 15) -extern int drm_add_ref_object(struct drm_file *priv, - struct drm_user_object *referenced_object, - enum drm_ref_type ref_action); +/* + * Memory type flags that can be or'ed together in the mask, but only + * one appears in flags. + */ + +/* System memory */ +#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) +/* Translation table memory */ +#define DRM_BO_FLAG_MEM_TT (1ULL << 25) +/* Vram memory */ +#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) +/* Up to the driver to define. */ +#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) +#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) +#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) +#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) +#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) +/* We can add more of these now with a 64-bit flag type */ /* - * Must be called with the struct_mutex held. + * This is a mask covering all of the memory type flags; easier to just + * use a single constant than a bunch of | values. It covers + * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 + */ +#define DRM_BO_MASK_MEM 0x00000000FF000000ULL +/* + * This adds all of the CPU-mapping options in with the memory + * type to label all bits which change how the page gets mapped */ +#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ + DRM_BO_FLAG_CACHED_MAPPED | \ + DRM_BO_FLAG_CACHED | \ + DRM_BO_FLAG_MAPPABLE) + +/* Driver-private flags */ +#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL -struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, - struct drm_user_object *referenced_object, - enum drm_ref_type ref_action); /* - * Must be called with the struct_mutex held. - * If "item" has been obtained by a call to drm_lookup_ref_object. You may not - * release the struct_mutex before calling drm_remove_ref_object. - * This function may temporarily release the struct_mutex. + * Don't block on validate and map. Instead, return EBUSY. + */ +#define DRM_BO_HINT_DONT_BLOCK 0x00000002 +/* + * Don't place this buffer on the unfenced list. This means + * that the buffer will not end up having a fence associated + * with it as a result of this operation + */ +#define DRM_BO_HINT_DONT_FENCE 0x00000004 +/** + * On hardware with no interrupt events for operation completion, + * indicates that the kernel should sleep while waiting for any blocking + * operation to complete rather than spinning. + * + * Has no effect otherwise. + */ +#define DRM_BO_HINT_WAIT_LAZY 0x00000008 +/* + * The client has compute relocations refering to this buffer using the + * offset in the presumed_offset field. If that offset ends up matching + * where this buffer lands, the kernel is free to skip executing those + * relocations */ +#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 + + +#define DRM_BO_MEM_LOCAL 0 +#define DRM_BO_MEM_TT 1 +#define DRM_BO_MEM_VRAM 2 +#define DRM_BO_MEM_PRIV0 3 +#define DRM_BO_MEM_PRIV1 4 +#define DRM_BO_MEM_PRIV2 5 +#define DRM_BO_MEM_PRIV3 6 +#define DRM_BO_MEM_PRIV4 7 + +#define DRM_BO_MEM_TYPES 8 /* For now. */ + +#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) +#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) -extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item); -extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, - enum drm_object_type type, - struct drm_user_object **object); -extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, - enum drm_object_type type); /*************************************************** * Fence objects. (drm_fence.c) */ struct drm_fence_object { - struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -470,7 +561,6 @@ enum drm_bo_type { struct drm_buffer_object { struct drm_device *dev; - struct drm_user_object base; /* * If there is a possibility that the usage variable is zero, @@ -546,7 +636,7 @@ struct drm_mem_type_manager { }; struct drm_bo_lock { - struct drm_user_object base; + // struct drm_user_object base; wait_queue_head_t queue; atomic_t write_lock_pending; atomic_t readers; @@ -655,22 +745,10 @@ struct drm_bo_driver { /* * buffer objects (drm_bo.c) */ -extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class); extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin); -extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, @@ -707,18 +785,9 @@ extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_c extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, unsigned long p_offset, unsigned long p_size, int kern_init); -extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, - uint64_t flags, uint64_t mask, uint32_t hint, - uint32_t fence_class, - struct drm_bo_info_rep *rep, - struct drm_buffer_object **bo_rep); extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, uint32_t handle, int check_owner); -extern int drm_bo_do_validate(struct drm_buffer_object *bo, - uint64_t flags, uint64_t mask, uint32_t hint, - uint32_t fence_class, - struct drm_bo_info_rep *rep); extern int drm_bo_evict_cached(struct drm_buffer_object *bo); extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo); @@ -766,8 +835,6 @@ extern int drm_bo_pfn_prot(struct drm_buffer_object *bo, unsigned long dst_offset, unsigned long *pfn, pgprot_t *prot); -extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, - struct drm_bo_info_rep *rep); /* @@ -812,23 +879,6 @@ extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * m void **virtual); extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem, void *virtual); -/* - * drm_bo_lock.c - * Simple replacement for the hardware lock on buffer manager init and clean. - */ - - -extern void drm_bo_init_lock(struct drm_bo_lock *lock); -extern void drm_bo_read_unlock(struct drm_bo_lock *lock); -extern int drm_bo_read_lock(struct drm_bo_lock *lock, - int interruptible); -extern int drm_bo_write_lock(struct drm_bo_lock *lock, - int interruptible, - struct drm_file *file_priv); - -extern int drm_bo_write_unlock(struct drm_bo_lock *lock, - struct drm_file *file_priv); - #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ BUG_ON(!mutex_is_locked(_mutex) || \ diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 1676b2a9..1257b0d4 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -88,30 +88,7 @@ again: return new_id; } -int drm_setmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - if (file_priv->minor->master && file_priv->minor->master != file_priv->master) - return -EINVAL; - - if (!file_priv->master) - return -EINVAL; - - if (!file_priv->minor->master && file_priv->minor->master != file_priv->master) - file_priv->minor->master = file_priv->master; - return 0; -} - -int drm_dropmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - if (!file_priv->master) - return -EINVAL; - file_priv->minor->master = NULL; - return 0; -} - -struct drm_master *drm_get_master(struct drm_minor *minor) +struct drm_master *drm_master_create(struct drm_minor *minor) { struct drm_master *master; @@ -119,7 +96,7 @@ struct drm_master *drm_get_master(struct drm_minor *minor) if (!master) return NULL; -// INIT_LIST_HEAD(&master->filelist); + kref_init(&master->refcount); spin_lock_init(&master->lock.spinlock); init_waitqueue_head(&master->lock.lock_queue); drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); @@ -131,8 +108,15 @@ struct drm_master *drm_get_master(struct drm_minor *minor) return master; } -void drm_put_master(struct drm_master *master) +struct drm_master *drm_master_get(struct drm_master *master) { + kref_get(&master->refcount); + return master; +} + +static void drm_master_destroy(struct kref *kref) +{ + struct drm_master *master = container_of(kref, struct drm_master, refcount); struct drm_magic_entry *pt, *next; struct drm_device *dev = master->minor->dev; @@ -166,21 +150,56 @@ void drm_put_master(struct drm_master *master) drm_free(master, sizeof(*master), DRM_MEM_DRIVER); } +void drm_master_put(struct drm_master **master) +{ + kref_put(&(*master)->refcount, drm_master_destroy); + *master = NULL; +} + +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + if (file_priv->minor->master && file_priv->minor->master != file_priv->master) + return -EINVAL; + + if (!file_priv->master) + return -EINVAL; + + if (!file_priv->minor->master && file_priv->minor->master != file_priv->master) { + mutex_lock(&dev->struct_mutex); + file_priv->minor->master = drm_master_get(file_priv->master); + mutex_unlock(&dev->struct_mutex); + } + + return 0; +} + +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + if (!file_priv->master) + return -EINVAL; + mutex_lock(&dev->struct_mutex); + drm_master_put(&file_priv->minor->master); + mutex_unlock(&dev->struct_mutex); + return 0; +} + static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { int retcode; + INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->vmalist); INIT_LIST_HEAD(&dev->maplist); - INIT_LIST_HEAD(&dev->filelist); spin_lock_init(&dev->count_lock); spin_lock_init(&dev->drw_lock); spin_lock_init(&dev->tasklet_lock); -// spin_lock_init(&dev->lock.spinlock); + init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); @@ -206,12 +225,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, return -ENOMEM; } - if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) { - drm_ht_remove(&dev->map_hash); - drm_mm_takedown(&dev->offset_manager); - return -ENOMEM; - } - /* the DRM has 6 counters */ dev->counters = 6; dev->types[0] = _DRM_STAT_LOCK; @@ -407,10 +420,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, return 0; err_g5: - drm_put_minor(dev, &dev->primary); + drm_put_minor(&dev->primary); err_g4: if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_put_minor(dev, &dev->control); + drm_put_minor(&dev->control); err_g3: if (!drm_fb_loaded) pci_disable_device(pdev); @@ -461,14 +474,14 @@ int drm_put_dev(struct drm_device * dev) * last minor released. * */ -int drm_put_minor(struct drm_device *dev, struct drm_minor **minor_p) +int drm_put_minor(struct drm_minor **minor_p) { struct drm_minor *minor = *minor_p; DRM_DEBUG("release secondary minor %d\n", minor->index); if (minor->type == DRM_MINOR_LEGACY) { - if (dev->driver->proc_cleanup) - dev->driver->proc_cleanup(minor); + if (minor->dev->driver->proc_cleanup) + minor->dev->driver->proc_cleanup(minor); drm_proc_cleanup(minor, drm_proc_root); } drm_sysfs_device_remove(minor); diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index d4d97a4d..0aabf943 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -715,13 +715,9 @@ static int drm_bo_vm_fault(struct vm_area_struct *vma, unsigned long ret = VM_FAULT_NOPAGE; dev = bo->dev; - err = drm_bo_read_lock(&dev->bm.bm_lock, 1); - if (err) - return VM_FAULT_NOPAGE; err = mutex_lock_interruptible(&bo->mutex); if (err) { - drm_bo_read_unlock(&dev->bm.bm_lock); return VM_FAULT_NOPAGE; } @@ -788,7 +784,6 @@ static int drm_bo_vm_fault(struct vm_area_struct *vma, out_unlock: BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); mutex_unlock(&bo->mutex); - drm_bo_read_unlock(&dev->bm.bm_lock); return ret; } #endif diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c deleted file mode 100644 index 4224b737..00000000 --- a/linux-core/i915_buffer.c +++ /dev/null @@ -1,311 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * - **************************************************************************/ -/* - * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -#include "drmP.h" -#include "i915_drm.h" -#include "i915_drv.h" - -struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev) -{ - return drm_agp_init_ttm(dev); -} - -int i915_fence_type(struct drm_buffer_object *bo, - uint32_t *fclass, - uint32_t *type) -{ - if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) - *type = 3; - else - *type = 1; - return 0; -} - -int i915_invalidate_caches(struct drm_device *dev, uint64_t flags) -{ - /* - * FIXME: Only emit once per batchbuffer submission. - */ - - uint32_t flush_cmd = MI_NO_WRITE_FLUSH; - - if (flags & DRM_BO_FLAG_READ) - flush_cmd |= MI_READ_FLUSH; - if (flags & DRM_BO_FLAG_EXE) - flush_cmd |= MI_EXE_FLUSH; - - return i915_emit_mi_flush(dev, flush_cmd); -} - -int i915_init_mem_type(struct drm_device *dev, uint32_t type, - struct drm_mem_type_manager *man) -{ - switch (type) { - case DRM_BO_MEM_LOCAL: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED; - man->drm_bus_maptype = 0; - man->gpu_offset = 0; - break; - case DRM_BO_MEM_TT: - if (!(drm_core_has_AGP(dev) && dev->agp)) { - DRM_ERROR("AGP is not enabled for memory type %u\n", - (unsigned)type); - return -EINVAL; - } - man->io_offset = dev->agp->agp_info.aper_base; - man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; - man->io_addr = NULL; - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; - man->drm_bus_maptype = _DRM_AGP; - man->gpu_offset = 0; - break; - case DRM_BO_MEM_VRAM: - if (!(drm_core_has_AGP(dev) && dev->agp)) { - DRM_ERROR("AGP is not enabled for memory type %u\n", - (unsigned)type); - return -EINVAL; - } - man->io_offset = dev->agp->agp_info.aper_base; - man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; - man->io_addr = NULL; - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; - man->drm_bus_maptype = _DRM_AGP; - man->gpu_offset = 0; - break; - case DRM_BO_MEM_PRIV0: /* for OS preallocated space */ - DRM_ERROR("PRIV0 not used yet.\n"); - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); - return -EINVAL; - } - return 0; -} - -/* - * i915_evict_flags: - * - * @bo: the buffer object to be evicted - * - * Return the bo flags for a buffer which is not mapped to the hardware. - * These will be placed in proposed_flags so that when the move is - * finished, they'll end up in bo->mem.flags - */ -uint64_t i915_evict_flags(struct drm_buffer_object *bo) -{ - switch (bo->mem.mem_type) { - case DRM_BO_MEM_LOCAL: - case DRM_BO_MEM_TT: - return DRM_BO_FLAG_MEM_LOCAL; - default: - return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; - } -} - -#if 0 /* See comment below */ - -static void i915_emit_copy_blit(struct drm_device * dev, - uint32_t src_offset, - uint32_t dst_offset, - uint32_t pages, int direction) -{ - uint32_t cur_pages; - uint32_t stride = PAGE_SIZE; - struct drm_i915_private *dev_priv = dev->dev_private; - RING_LOCALS; - - if (!dev_priv) - return; - - i915_kernel_lost_context(dev); - while (pages > 0) { - cur_pages = pages; - if (cur_pages > 2048) - cur_pages = 2048; - pages -= cur_pages; - - BEGIN_LP_RING(6); - OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | - XY_SRC_COPY_BLT_WRITE_RGB); - OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) | - (1 << 25) | (direction ? (1 << 30) : 0)); - OUT_RING((cur_pages << 16) | PAGE_SIZE); - OUT_RING(dst_offset); - OUT_RING(stride & 0xffff); - OUT_RING(src_offset); - ADVANCE_LP_RING(); - } - return; -} - -static int i915_move_blit(struct drm_buffer_object * bo, - int evict, int no_wait, struct drm_bo_mem_reg * new_mem) -{ - struct drm_bo_mem_reg *old_mem = &bo->mem; - int dir = 0; - - if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->mm_node->start < - old_mem->mm_node->start + old_mem->mm_node->size)) { - dir = 1; - } - - i915_emit_copy_blit(bo->dev, - old_mem->mm_node->start << PAGE_SHIFT, - new_mem->mm_node->start << PAGE_SHIFT, - new_mem->num_pages, dir); - - i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH); - - return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0, - DRM_FENCE_TYPE_EXE | - DRM_I915_FENCE_TYPE_RW, - DRM_I915_FENCE_FLAG_FLUSHED, new_mem); -} - -/* - * Flip destination ttm into cached-coherent AGP, - * then blit and subsequently move out again. - */ - -static int i915_move_flip(struct drm_buffer_object * bo, - int evict, int no_wait, struct drm_bo_mem_reg * new_mem) -{ - struct drm_device *dev = bo->dev; - struct drm_bo_mem_reg tmp_mem; - int ret; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - tmp_mem.mask = DRM_BO_FLAG_MEM_TT | - DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING; - - ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); - if (ret) - return ret; - - ret = drm_bind_ttm(bo->ttm, &tmp_mem); - if (ret) - goto out_cleanup; - - ret = i915_move_blit(bo, 1, no_wait, &tmp_mem); - if (ret) - goto out_cleanup; - - ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); -out_cleanup: - if (tmp_mem.mm_node) { - mutex_lock(&dev->struct_mutex); - if (tmp_mem.mm_node != bo->pinned_node) - drm_mm_put_block(tmp_mem.mm_node); - tmp_mem.mm_node = NULL; - mutex_unlock(&dev->struct_mutex); - } - return ret; -} - -#endif - -/* - * Disable i915_move_flip for now, since we can't guarantee that the hardware - * lock is held here. To re-enable we need to make sure either - * a) The X server is using DRM to submit commands to the ring, or - * b) DRM can use the HP ring for these blits. This means i915 needs to - * implement a new ring submission mechanism and fence class. - */ -int i915_move(struct drm_buffer_object *bo, - int evict, int no_wait, struct drm_bo_mem_reg *new_mem) -{ - struct drm_bo_mem_reg *old_mem = &bo->mem; - - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/ - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } else { - if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/ - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - return 0; -} - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) -static inline void clflush(volatile void *__p) -{ - asm volatile("clflush %0" : "+m" (*(char __force *)__p)); -} -#endif - -static inline void drm_cache_flush_addr(void *virt) -{ -#ifdef cpu_has_clflush - int i; - - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - clflush(virt+i); -#endif -} - -static inline void drm_cache_flush_page(struct page *p) -{ - drm_cache_flush_addr(page_address(p)); -} - -void i915_flush_ttm(struct drm_ttm *ttm) -{ - int i; - - if (!ttm) - return; - - DRM_MEMORYBARRIER(); - -#ifdef CONFIG_X86_32 -#ifndef cpu_has_clflush -#define cpu_has_clflush 0 -#endif - /* Hopefully nobody has built an x86-64 processor without clflush */ - if (!cpu_has_clflush) { - wbinvd(); - DRM_MEMORYBARRIER(); - return; - } -#endif - - for (i = ttm->num_pages - 1; i >= 0; i--) - drm_cache_flush_page(drm_ttm_get_page(ttm, i)); - - DRM_MEMORYBARRIER(); -} diff --git a/linux-core/i915_execbuf.c b/linux-core/i915_execbuf.c deleted file mode 100644 index 932882dd..00000000 --- a/linux-core/i915_execbuf.c +++ /dev/null @@ -1,921 +0,0 @@ -/* - * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: - * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> - * Dave Airlie - * Keith Packard - * ... ? - */ - -#include "drmP.h" -#include "drm.h" -#include "i915_drm.h" -#include "i915_drv.h" - -#if DRM_DEBUG_CODE -#define DRM_DEBUG_RELOCATION (drm_debug != 0) -#else -#define DRM_DEBUG_RELOCATION 0 -#endif - -enum i915_buf_idle { - I915_RELOC_UNCHECKED, - I915_RELOC_IDLE, - I915_RELOC_BUSY -}; - -struct i915_relocatee_info { - struct drm_buffer_object *buf; - unsigned long offset; - uint32_t *data_page; - unsigned page_offset; - struct drm_bo_kmap_obj kmap; - int is_iomem; - int dst; - int idle; - int performed_ring_relocs; -#ifdef DRM_KMAP_ATOMIC_PROT_PFN - unsigned long pfn; - pgprot_t pg_prot; -#endif -}; - -struct drm_i915_validate_buffer { - struct drm_buffer_object *buffer; - int presumed_offset_correct; - void __user *data; - int ret; - enum i915_buf_idle idle; -}; - -/* - * I'd like to use MI_STORE_DATA_IMM here, but I can't make - * it work. Seems like GART writes are broken with that - * instruction. Also I'm not sure that MI_FLUSH will - * act as a memory barrier for that instruction. It will - * for this single dword 2D blit. - */ - -static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset, - uint32_t value) -{ - struct drm_i915_private *dev_priv = - (struct drm_i915_private *)dev->dev_private; - - RING_LOCALS; - i915_kernel_lost_context(dev); - BEGIN_LP_RING(6); - OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3)); - OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40)); - OUT_RING((0x1 << 16) | (0x4)); - OUT_RING(offset); - OUT_RING(value); - OUT_RING(0); - ADVANCE_LP_RING(); -} - -static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer - *buffers, unsigned num_buffers) -{ - while (num_buffers--) - drm_bo_usage_deref_locked(&buffers[num_buffers].buffer); -} - -int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, - struct drm_i915_validate_buffer *buffers, - struct i915_relocatee_info *relocatee, uint32_t * reloc) -{ - unsigned index; - unsigned long new_cmd_offset; - u32 val; - int ret, i; - int buf_index = -1; - - /* - * FIXME: O(relocs * buffers) complexity. - */ - - for (i = 0; i <= num_buffers; i++) - if (buffers[i].buffer) - if (reloc[2] == buffers[i].buffer->base.hash.key) - buf_index = i; - - if (buf_index == -1) { - DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]); - return -EINVAL; - } - - /* - * Short-circuit relocations that were correctly - * guessed by the client - */ - if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION) - return 0; - - new_cmd_offset = reloc[0]; - if (!relocatee->data_page || - !drm_bo_same_page(relocatee->offset, new_cmd_offset)) { - struct drm_bo_mem_reg *mem = &relocatee->buf->mem; - - drm_bo_kunmap(&relocatee->kmap); - relocatee->data_page = NULL; - relocatee->offset = new_cmd_offset; - - if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) { - ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0); - if (ret) - return ret; - relocatee->idle = I915_RELOC_IDLE; - } - - if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) && - (mem->flags & DRM_BO_FLAG_CACHED_MAPPED))) - drm_bo_evict_cached(relocatee->buf); - - ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT, - 1, &relocatee->kmap); - if (ret) { - DRM_ERROR - ("Could not map command buffer to apply relocs\n %08lx", - new_cmd_offset); - return ret; - } - relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, - &relocatee->is_iomem); - relocatee->page_offset = (relocatee->offset & PAGE_MASK); - } - - val = buffers[buf_index].buffer->offset; - index = (reloc[0] - relocatee->page_offset) >> 2; - - /* add in validate */ - val = val + reloc[1]; - - if (DRM_DEBUG_RELOCATION) { - if (buffers[buf_index].presumed_offset_correct && - relocatee->data_page[index] != val) { - DRM_DEBUG - ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n", - reloc[0], reloc[1], buf_index, - relocatee->data_page[index], val); - } - } - - if (relocatee->is_iomem) - iowrite32(val, relocatee->data_page + index); - else - relocatee->data_page[index] = val; - return 0; -} - -int i915_process_relocs(struct drm_file *file_priv, - uint32_t buf_handle, - uint32_t __user ** reloc_user_ptr, - struct i915_relocatee_info *relocatee, - struct drm_i915_validate_buffer *buffers, - uint32_t num_buffers) -{ - int ret, reloc_stride; - uint32_t cur_offset; - uint32_t reloc_count; - uint32_t reloc_type; - uint32_t reloc_buf_size; - uint32_t *reloc_buf = NULL; - int i; - - /* do a copy from user from the user ptr */ - ret = get_user(reloc_count, *reloc_user_ptr); - if (ret) { - DRM_ERROR("Could not map relocation buffer.\n"); - goto out; - } - - ret = get_user(reloc_type, (*reloc_user_ptr) + 1); - if (ret) { - DRM_ERROR("Could not map relocation buffer.\n"); - goto out; - } - - if (reloc_type != 0) { - DRM_ERROR("Unsupported relocation type requested\n"); - ret = -EINVAL; - goto out; - } - - reloc_buf_size = - (I915_RELOC_HEADER + - (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t); - reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); - if (!reloc_buf) { - DRM_ERROR("Out of memory for reloc buffer\n"); - ret = -ENOMEM; - goto out; - } - - if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) { - ret = -EFAULT; - goto out; - } - - /* get next relocate buffer handle */ - *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2]; - - reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ - - DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, - *reloc_user_ptr); - - for (i = 0; i < reloc_count; i++) { - cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE); - - ret = i915_apply_reloc(file_priv, num_buffers, buffers, - relocatee, reloc_buf + cur_offset); - if (ret) - goto out; - } - - out: - if (reloc_buf) - kfree(reloc_buf); - - if (relocatee->data_page) { - drm_bo_kunmap(&relocatee->kmap); - relocatee->data_page = NULL; - } - - return ret; -} - -static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, - uint32_t __user * reloc_user_ptr, - struct drm_i915_validate_buffer *buffers, - uint32_t buf_count) -{ - struct drm_device *dev = file_priv->minor->dev; - struct i915_relocatee_info relocatee; - int ret = 0; - int b; - - /* - * Short circuit relocations when all previous - * buffers offsets were correctly guessed by - * the client - */ - if (!DRM_DEBUG_RELOCATION) { - for (b = 0; b < buf_count; b++) - if (!buffers[b].presumed_offset_correct) - break; - - if (b == buf_count) - return 0; - } - - memset(&relocatee, 0, sizeof(relocatee)); - relocatee.idle = I915_RELOC_UNCHECKED; - - mutex_lock(&dev->struct_mutex); - relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!relocatee.buf) { - DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); - ret = -EINVAL; - goto out_err; - } - - mutex_lock(&relocatee.buf->mutex); - while (reloc_user_ptr) { - ret = - i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, - &relocatee, buffers, buf_count); - if (ret) { - DRM_ERROR("process relocs failed\n"); - goto out_err1; - } - } - - out_err1: - mutex_unlock(&relocatee.buf->mutex); - drm_bo_usage_deref_unlocked(&relocatee.buf); - out_err: - return ret; -} - -static void i915_clear_relocatee(struct i915_relocatee_info *relocatee) -{ - if (relocatee->data_page) { -#ifndef DRM_KMAP_ATOMIC_PROT_PFN - drm_bo_kunmap(&relocatee->kmap); -#else - kunmap_atomic(relocatee->data_page, KM_USER0); -#endif - relocatee->data_page = NULL; - } - relocatee->buf = NULL; - relocatee->dst = ~0; -} - -static int i915_update_relocatee(struct i915_relocatee_info *relocatee, - struct drm_i915_validate_buffer *buffers, - unsigned int dst, unsigned long dst_offset) -{ - int ret; - - if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) { - i915_clear_relocatee(relocatee); - relocatee->dst = dst; - relocatee->buf = buffers[dst].buffer; - relocatee->idle = buffers[dst].idle; - - /* - * Check for buffer idle. If the buffer is busy, revert to - * ring relocations. - */ - - if (relocatee->idle == I915_RELOC_UNCHECKED) { - preempt_enable(); - mutex_lock(&relocatee->buf->mutex); - - ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0); - if (ret == 0) - relocatee->idle = I915_RELOC_IDLE; - else { - relocatee->idle = I915_RELOC_BUSY; - relocatee->performed_ring_relocs = 1; - } - mutex_unlock(&relocatee->buf->mutex); - preempt_disable(); - buffers[dst].idle = relocatee->idle; - } - } - - if (relocatee->idle == I915_RELOC_BUSY) - return 0; - - if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) { - DRM_ERROR("Relocation destination out of bounds.\n"); - return -EINVAL; - } - if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) || - NULL == relocatee->data_page)) { -#ifdef DRM_KMAP_ATOMIC_PROT_PFN - if (NULL != relocatee->data_page) { - kunmap_atomic(relocatee->data_page, KM_USER0); - relocatee->data_page = NULL; - } - ret = drm_bo_pfn_prot(relocatee->buf, dst_offset, - &relocatee->pfn, &relocatee->pg_prot); - if (ret) { - DRM_ERROR("Can't map relocation destination.\n"); - return -EINVAL; - } - relocatee->data_page = - kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0, - relocatee->pg_prot); -#else - if (NULL != relocatee->data_page) { - drm_bo_kunmap(&relocatee->kmap); - relocatee->data_page = NULL; - } - - ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT, - 1, &relocatee->kmap); - if (ret) { - DRM_ERROR("Can't map relocation destination.\n"); - return ret; - } - - relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, - &relocatee->is_iomem); -#endif - relocatee->page_offset = dst_offset & PAGE_MASK; - } - return 0; -} - -static int i915_apply_post_reloc(uint32_t reloc[], - struct drm_i915_validate_buffer *buffers, - uint32_t num_buffers, - struct i915_relocatee_info *relocatee) -{ - uint32_t reloc_buffer = reloc[2]; - uint32_t dst_buffer = reloc[3]; - uint32_t val; - uint32_t index; - int ret; - - if (likely(buffers[reloc_buffer].presumed_offset_correct)) - return 0; - if (unlikely(reloc_buffer >= num_buffers)) { - DRM_ERROR("Invalid reloc buffer index.\n"); - return -EINVAL; - } - if (unlikely(dst_buffer >= num_buffers)) { - DRM_ERROR("Invalid dest buffer index.\n"); - return -EINVAL; - } - - ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]); - if (unlikely(ret)) - return ret; - - val = buffers[reloc_buffer].buffer->offset; - index = (reloc[0] - relocatee->page_offset) >> 2; - val = val + reloc[1]; - - if (relocatee->idle == I915_RELOC_BUSY) { - i915_emit_ring_reloc(relocatee->buf->dev, - relocatee->buf->offset + reloc[0], val); - return 0; - } -#ifdef DRM_KMAP_ATOMIC_PROT_PFN - relocatee->data_page[index] = val; -#else - if (likely(relocatee->is_iomem)) - iowrite32(val, relocatee->data_page + index); - else - relocatee->data_page[index] = val; -#endif - - return 0; -} - -static int i915_post_relocs(struct drm_file *file_priv, - uint32_t __user * new_reloc_ptr, - struct drm_i915_validate_buffer *buffers, - unsigned int num_buffers) -{ - uint32_t *reloc; - uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); - uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t); - struct i915_relocatee_info relocatee; - uint32_t reloc_type; - uint32_t num_relocs; - uint32_t count; - int ret = 0; - int i; - int short_circuit = 1; - uint32_t __user *reloc_ptr; - uint64_t new_reloc_data; - uint32_t reloc_buf_size; - uint32_t *reloc_buf; - - for (i = 0; i < num_buffers; ++i) { - if (unlikely(!buffers[i].presumed_offset_correct)) { - short_circuit = 0; - break; - } - } - - if (likely(short_circuit)) - return 0; - - memset(&relocatee, 0, sizeof(relocatee)); - - while (new_reloc_ptr) { - reloc_ptr = new_reloc_ptr; - - ret = get_user(num_relocs, reloc_ptr); - if (unlikely(ret)) - goto out; - if (unlikely(!access_ok(VERIFY_READ, reloc_ptr, - header_size + - num_relocs * reloc_stride))) - return -EFAULT; - - ret = __get_user(reloc_type, reloc_ptr + 1); - if (unlikely(ret)) - goto out; - - if (unlikely(reloc_type != 1)) { - DRM_ERROR("Unsupported relocation type requested.\n"); - ret = -EINVAL; - goto out; - } - - ret = __get_user(new_reloc_data, reloc_ptr + 2); - new_reloc_ptr = (uint32_t __user *) (unsigned long) - new_reloc_data; - - reloc_ptr += I915_RELOC_HEADER; - - if (num_relocs == 0) - goto out; - - reloc_buf_size = - (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t); - reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); - if (!reloc_buf) { - DRM_ERROR("Out of memory for reloc buffer\n"); - ret = -ENOMEM; - goto out; - } - - if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) { - ret = -EFAULT; - goto out; - } - reloc = reloc_buf; - preempt_disable(); - for (count = 0; count < num_relocs; ++count) { - ret = i915_apply_post_reloc(reloc, buffers, - num_buffers, &relocatee); - if (unlikely(ret)) { - preempt_enable(); - goto out; - } - reloc += I915_RELOC0_STRIDE; - } - preempt_enable(); - - if (reloc_buf) { - kfree(reloc_buf); - reloc_buf = NULL; - } - i915_clear_relocatee(&relocatee); - } - - out: - /* - * Flush ring relocs so the command parser will pick them up. - */ - - if (relocatee.performed_ring_relocs) - (void)i915_emit_mi_flush(file_priv->minor->dev, 0); - - i915_clear_relocatee(&relocatee); - if (reloc_buf) { - kfree(reloc_buf); - reloc_buf = NULL; - } - - return ret; -} - -static int i915_check_presumed(struct drm_i915_op_arg *arg, - struct drm_buffer_object *bo, - uint32_t __user * data, int *presumed_ok) -{ - struct drm_bo_op_req *req = &arg->d.req; - uint32_t hint_offset; - uint32_t hint = req->bo_req.hint; - - *presumed_ok = 0; - - if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET)) - return 0; - if (bo->offset == req->bo_req.presumed_offset) { - *presumed_ok = 1; - return 0; - } - - /* - * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in - * the user-space IOCTL argument list, since the buffer has moved, - * we're about to apply relocations and we might subsequently - * hit an -EAGAIN. In that case the argument list will be reused by - * user-space, but the presumed offset is no longer valid. - * - * Needless to say, this is a bit ugly. - */ - - hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg; - hint &= ~DRM_BO_HINT_PRESUMED_OFFSET; - return __put_user(hint, data + hint_offset); -} - -/* - * Validate, add fence and relocate a block of bos from a userspace list - */ -int i915_validate_buffer_list(struct drm_file *file_priv, - unsigned int fence_class, uint64_t data, - struct drm_i915_validate_buffer *buffers, - uint32_t * num_buffers, - uint32_t __user ** post_relocs) -{ - struct drm_i915_op_arg arg; - struct drm_bo_op_req *req = &arg.d.req; - int ret = 0; - unsigned buf_count = 0; - uint32_t buf_handle; - uint32_t __user *reloc_user_ptr; - struct drm_i915_validate_buffer *item = buffers; - *post_relocs = NULL; - - do { - if (buf_count >= *num_buffers) { - DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers); - ret = -EINVAL; - goto out_err; - } - item = buffers + buf_count; - item->buffer = NULL; - item->presumed_offset_correct = 0; - item->idle = I915_RELOC_UNCHECKED; - - if (copy_from_user - (&arg, (void __user *)(unsigned long)data, sizeof(arg))) { - ret = -EFAULT; - goto out_err; - } - - ret = 0; - if (req->op != drm_bo_validate) { - DRM_ERROR - ("Buffer object operation wasn't \"validate\".\n"); - ret = -EINVAL; - goto out_err; - } - item->ret = 0; - item->data = (void __user *)(unsigned long)data; - - buf_handle = req->bo_req.handle; - reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr; - - /* - * Switch mode to post-validation relocations? - */ - - if (unlikely((buf_count == 0) && (*post_relocs == NULL) && - (reloc_user_ptr != NULL))) { - uint32_t reloc_type; - - ret = get_user(reloc_type, reloc_user_ptr + 1); - if (ret) - goto out_err; - - if (reloc_type == 1) - *post_relocs = reloc_user_ptr; - - } - - if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) { - ret = - i915_exec_reloc(file_priv, buf_handle, - reloc_user_ptr, buffers, buf_count); - if (ret) - goto out_err; - DRM_MEMORYBARRIER(); - } - - ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, - req->bo_req.flags, - req->bo_req.mask, req->bo_req.hint, - req->bo_req.fence_class, - NULL, &item->buffer); - if (ret) { - DRM_ERROR("error on handle validate %d\n", ret); - goto out_err; - } - - buf_count++; - - ret = i915_check_presumed(&arg, item->buffer, - (uint32_t __user *) - (unsigned long)data, - &item->presumed_offset_correct); - if (ret) - goto out_err; - - data = arg.next; - } while (data != 0); - out_err: - *num_buffers = buf_count; - item->ret = (ret != -EAGAIN) ? ret : 0; - return ret; -} - -/* - * Remove all buffers from the unfenced list. - * If the execbuffer operation was aborted, for example due to a signal, - * this also make sure that buffers retain their original state and - * fence pointers. - * Copy back buffer information to user-space unless we were interrupted - * by a signal. In which case the IOCTL must be rerun. - */ - -static int i915_handle_copyback(struct drm_device *dev, - struct drm_i915_validate_buffer *buffers, - unsigned int num_buffers, int ret) -{ - int err = ret; - int i; - struct drm_i915_op_arg arg; - struct drm_buffer_object *bo; - - if (ret) - drm_putback_buffer_objects(dev); - - if (ret != -EAGAIN) { - for (i = 0; i < num_buffers; ++i) { - arg.handled = 1; - arg.d.rep.ret = buffers->ret; - bo = buffers->buffer; - mutex_lock(&bo->mutex); - drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info); - mutex_unlock(&bo->mutex); - if (__copy_to_user(buffers->data, &arg, sizeof(arg))) - err = -EFAULT; - buffers++; - } - } - - return err; -} - -/* - * Create a fence object, and if that fails, pretend that everything is - * OK and just idle the GPU. - */ - -void i915_fence_or_sync(struct drm_file *file_priv, - uint32_t fence_flags, - struct drm_fence_arg *fence_arg, - struct drm_fence_object **fence_p) -{ - struct drm_device *dev = file_priv->minor->dev; - int ret; - struct drm_fence_object *fence; - - ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence); - - if (ret) { - - /* - * Fence creation failed. - * Fall back to synchronous operation and idle the engine. - */ - - (void)i915_emit_mi_flush(dev, MI_READ_FLUSH); - (void)i915_quiescent(dev); - - if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { - - /* - * Communicate to user-space that - * fence creation has failed and that - * the engine is idle. - */ - - fence_arg->handle = ~0; - fence_arg->error = ret; - } - drm_putback_buffer_objects(dev); - if (fence_p) - *fence_p = NULL; - return; - } - - if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { - - ret = drm_fence_add_user_object(file_priv, fence, - fence_flags & - DRM_FENCE_FLAG_SHAREABLE); - if (!ret) - drm_fence_fill_arg(fence, fence_arg); - else { - /* - * Fence user object creation failed. - * We must idle the engine here as well, as user- - * space expects a fence object to wait on. Since we - * have a fence object we wait for it to signal - * to indicate engine "sufficiently" idle. - */ - - (void)drm_fence_object_wait(fence, 0, 1, fence->type); - drm_fence_usage_deref_unlocked(&fence); - fence_arg->handle = ~0; - fence_arg->error = ret; - } - } - - if (fence_p) - *fence_p = fence; - else if (fence) - drm_fence_usage_deref_unlocked(&fence); -} - -int i915_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = (struct drm_i915_private *) - dev->dev_private; - struct drm_i915_master_private *master_priv = - (struct drm_i915_master_private *) - dev->primary->master->driver_priv; - struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *) - master_priv->sarea_priv; - struct drm_i915_execbuffer *exec_buf = data; - struct drm_i915_batchbuffer *batch = &exec_buf->batch; - struct drm_fence_arg *fence_arg = &exec_buf->fence_arg; - int num_buffers; - int ret; - uint32_t __user *post_relocs; - - if (!dev_priv->allow_batchbuffer) { - DRM_ERROR("Batchbuffer ioctl disabled\n"); - return -EINVAL; - } - - if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, - batch->num_cliprects * - sizeof(struct - drm_clip_rect))) - return -EFAULT; - - if (exec_buf->num_buffers > dev_priv->max_validate_buffers) - return -EINVAL; - - ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); - if (ret) - return ret; - - /* - * The cmdbuf_mutex makes sure the validate-submit-fence - * operation is atomic. - */ - - ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); - if (ret) { - drm_bo_read_unlock(&dev->bm.bm_lock); - return -EAGAIN; - } - - num_buffers = exec_buf->num_buffers; - - if (!dev_priv->val_bufs) { - dev_priv->val_bufs = - vmalloc(sizeof(struct drm_i915_validate_buffer) * - dev_priv->max_validate_buffers); - } - if (!dev_priv->val_bufs) { - drm_bo_read_unlock(&dev->bm.bm_lock); - mutex_unlock(&dev_priv->cmdbuf_mutex); - return -ENOMEM; - } - - /* validate buffer list + fixup relocations */ - ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, - dev_priv->val_bufs, &num_buffers, - &post_relocs); - if (ret) - goto out_err0; - - if (post_relocs) { - ret = i915_post_relocs(file_priv, post_relocs, - dev_priv->val_bufs, num_buffers); - if (ret) - goto out_err0; - } - - /* make sure all previous memory operations have passed */ - DRM_MEMORYBARRIER(); - - if (!post_relocs) { - drm_agp_chipset_flush(dev); - batch->start = - dev_priv->val_bufs[num_buffers - 1].buffer->offset; - } else { - batch->start += dev_priv->val_bufs[0].buffer->offset; - } - - DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n", - batch->start, batch->used, batch->num_cliprects); - - ret = i915_dispatch_batchbuffer(dev, batch); - if (ret) - goto out_err0; - if (sarea_priv) - sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL); - - out_err0: - ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret); - mutex_lock(&dev->struct_mutex); - i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers); - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev_priv->cmdbuf_mutex); - drm_bo_read_unlock(&dev->bm.bm_lock); - return ret; -} diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c deleted file mode 100644 index 436b7e1f..00000000 --- a/linux-core/i915_fence.c +++ /dev/null @@ -1,273 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * - **************************************************************************/ -/* - * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -#include "drmP.h" -#include "drm.h" -#include "i915_drm.h" -#include "i915_drv.h" - -/* - * Initiate a sync flush if it's not already pending. - */ - -static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv, - struct drm_fence_class_manager *fc) -{ - if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) && - !dev_priv->flush_pending) { - dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv); - dev_priv->flush_flags = fc->pending_flush; - dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0); - I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); - dev_priv->flush_pending = 1; - fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW; - } -} - -static inline void i915_report_rwflush(struct drm_device *dev, - struct drm_i915_private *dev_priv) -{ - if (unlikely(dev_priv->flush_pending)) { - - uint32_t flush_flags; - uint32_t i_status; - uint32_t flush_sequence; - - i_status = READ_HWSP(dev_priv, 0); - if ((i_status & (1 << 12)) != - (dev_priv->saved_flush_status & (1 << 12))) { - flush_flags = dev_priv->flush_flags; - flush_sequence = dev_priv->flush_sequence; - dev_priv->flush_pending = 0; - drm_fence_handler(dev, 0, flush_sequence, - flush_flags, 0); - } - } -} - -static void i915_fence_flush(struct drm_device *dev, - uint32_t fence_class) -{ - struct drm_i915_private *dev_priv = - (struct drm_i915_private *) dev->dev_private; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->fence_class[0]; - unsigned long irq_flags; - - if (unlikely(!dev_priv)) - return; - - write_lock_irqsave(&fm->lock, irq_flags); - i915_initiate_rwflush(dev_priv, fc); - write_unlock_irqrestore(&fm->lock, irq_flags); -} - - -static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class, - uint32_t waiting_types) -{ - struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->fence_class[0]; - uint32_t sequence; - - if (unlikely(!dev_priv)) - return; - - /* - * First, report any executed sync flush: - */ - - i915_report_rwflush(dev, dev_priv); - - /* - * Report A new breadcrumb, and adjust IRQs. - */ - - if (waiting_types & DRM_FENCE_TYPE_EXE) { - - sequence = READ_BREADCRUMB(dev_priv); - drm_fence_handler(dev, 0, sequence, - DRM_FENCE_TYPE_EXE, 0); - - if (dev_priv->fence_irq_on && - !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) { - i915_user_irq_off(dev); - dev_priv->fence_irq_on = 0; - } else if (!dev_priv->fence_irq_on && - (fc->waiting_types & DRM_FENCE_TYPE_EXE)) { - i915_user_irq_on(dev); - dev_priv->fence_irq_on = 1; - } - } - - /* - * There may be new RW flushes pending. Start them. - */ - - i915_initiate_rwflush(dev_priv, fc); - - /* - * And possibly, but unlikely, they finish immediately. - */ - - i915_report_rwflush(dev, dev_priv); - -} - -static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, - uint32_t flags, uint32_t *sequence, - uint32_t *native_type) -{ - struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; - if (unlikely(!dev_priv)) - return -EINVAL; - - i915_emit_irq(dev); - *sequence = (uint32_t) dev_priv->counter; - *native_type = DRM_FENCE_TYPE_EXE; - if (flags & DRM_I915_FENCE_FLAG_FLUSHED) - *native_type |= DRM_I915_FENCE_TYPE_RW; - - return 0; -} - -void i915_fence_handler(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->fence_class[0]; - - write_lock(&fm->lock); - if (likely(dev_priv->fence_irq_on)) - i915_fence_poll(dev, 0, fc->waiting_types); - write_unlock(&fm->lock); -} - -/* - * We need a separate wait function since we need to poll for - * sync flushes. - */ - -static int i915_fence_wait(struct drm_fence_object *fence, - int lazy, int interruptible, uint32_t mask) -{ - struct drm_device *dev = fence->dev; - struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->fence_class[0]; - int ret; - unsigned long _end = jiffies + 3 * DRM_HZ; - - drm_fence_object_flush(fence, mask); - if (likely(interruptible)) - ret = wait_event_interruptible_timeout - (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), - 3 * DRM_HZ); - else - ret = wait_event_timeout - (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), - 3 * DRM_HZ); - - if (unlikely(ret == -ERESTARTSYS)) - return -EAGAIN; - - if (unlikely(ret == 0)) - return -EBUSY; - - if (likely(mask == DRM_FENCE_TYPE_EXE || - drm_fence_object_signaled(fence, mask))) - return 0; - - /* - * Remove this code snippet when fixed. HWSTAM doesn't let - * flush info through... - */ - - if (unlikely(dev_priv && !dev_priv->irq_enabled)) { - unsigned long irq_flags; - - DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n"); - msleep(100); - dev_priv->flush_pending = 0; - write_lock_irqsave(&fm->lock, irq_flags); - drm_fence_handler(dev, fence->fence_class, - fence->sequence, fence->type, 0); - write_unlock_irqrestore(&fm->lock, irq_flags); - } - - /* - * Poll for sync flush completion. - */ - - return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end); -} - -static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence) -{ - uint32_t flush_flags = fence->waiting_types & - ~(DRM_FENCE_TYPE_EXE | fence->signaled_types); - - if (likely(flush_flags == 0 || - ((flush_flags & ~fence->native_types) == 0) || - (fence->signaled_types != DRM_FENCE_TYPE_EXE))) - return 0; - else { - struct drm_device *dev = fence->dev; - struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; - struct drm_fence_driver *driver = dev->driver->fence_driver; - - if (unlikely(!dev_priv)) - return 0; - - if (dev_priv->flush_pending) { - uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & - driver->sequence_mask; - - if (diff < driver->wrap_diff) - return 0; - } - } - return flush_flags; -} - -struct drm_fence_driver i915_fence_driver = { - .num_classes = 1, - .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), - .flush_diff = (1U << (BREADCRUMB_BITS - 2)), - .sequence_mask = BREADCRUMB_MASK, - .has_irq = NULL, - .emit = i915_fence_emit_sequence, - .flush = i915_fence_flush, - .poll = i915_fence_poll, - .needed_flush = i915_fence_needed_flush, - .wait = i915_fence_wait, -}; diff --git a/linux-core/radeon_atombios.c b/linux-core/radeon_atombios.c index ee628732..eb482d92 100644 --- a/linux-core/radeon_atombios.c +++ b/linux-core/radeon_atombios.c @@ -62,12 +62,16 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio_for_ddc(struct drm_de i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; + i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; + i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); i2c.put_clk_mask = (1 << gpio.ucClkEnShift); i2c.put_data_mask = (1 << gpio.ucDataEnShift); i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); i2c.get_data_mask = (1 << gpio.ucDataY_Shift); + i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); + i2c.a_data_mask = (1 << gpio.ucDataA_Shift); i2c.valid = true; return i2c; @@ -94,7 +98,7 @@ static void radeon_atom_apply_quirks(struct drm_device *dev, int index) mode_info->bios_connector[index].ddc_i2c.valid = false; } } -} +} bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev) { @@ -146,7 +150,7 @@ bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device } mode_info->bios_connector[i].dac_type = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; - + if ((i == ATOM_DEVICE_TV1_INDEX) || (i == ATOM_DEVICE_TV2_INDEX) || (i == ATOM_DEVICE_TV1_INDEX)) @@ -161,7 +165,7 @@ bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device mode_info->bios_connector[i].ddc_i2c = radeon_lookup_gpio_for_ddc(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux); } else - mode_info->bios_connector[i].ddc_i2c = + mode_info->bios_connector[i].ddc_i2c = radeon_lookup_gpio_for_ddc(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux); if (i == ATOM_DEVICE_DFP1_INDEX) @@ -243,7 +247,7 @@ bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device } } - + DRM_DEBUG("BIOS Connector table\n"); for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { if (!mode_info->bios_connector[i].valid) @@ -265,7 +269,7 @@ union firmware_info { ATOM_FIRMWARE_INFO_V1_3 info_13; ATOM_FIRMWARE_INFO_V1_4 info_14; }; - + bool radeon_atom_get_clock_info(struct drm_device *dev) { struct drm_radeon_private *dev_priv = dev->dev_private; @@ -284,8 +288,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) pll->reference_div = 0; pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); - pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - + pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); + if (pll->pll_out_min == 0) { if (radeon_is_avivo(dev_priv)) pll->pll_out_min = 64800; @@ -298,7 +302,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) pll->xclk = le16_to_cpu(firmware_info->info.usMaxPixelClock); - return true; + return true; } union lvds_info { @@ -330,7 +334,7 @@ void radeon_get_lvds_info(struct radeon_encoder *encoder) encoder->vblank = le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); encoder->hoverplus = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); encoder->hsync_width = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); - encoder->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); + encoder->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); } void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable) @@ -342,7 +346,7 @@ void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable) int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); args.ucEnable = enable; - + atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); } @@ -355,7 +359,7 @@ void radeon_atom_static_pwrmgt_setup(struct drm_device *dev, int enable) int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt); args.ucEnable = enable; - + atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); } diff --git a/linux-core/radeon_buffer.c b/linux-core/radeon_buffer.c index 227a2fa0..900d450a 100644 --- a/linux-core/radeon_buffer.c +++ b/linux-core/radeon_buffer.c @@ -55,10 +55,14 @@ int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags) drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; + if (!dev_priv->cp_running) + return 0; + BEGIN_RING(4); RADEON_FLUSH_CACHE(); RADEON_FLUSH_ZCACHE(); ADVANCE_RING(); + COMMIT_RING(); return 0; } @@ -261,6 +265,6 @@ uint64_t radeon_evict_flags(struct drm_buffer_object *bo) case DRM_BO_MEM_TT: return DRM_BO_FLAG_MEM_LOCAL; default: - return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; + return DRM_BO_FLAG_MEM_TT; } } diff --git a/linux-core/radeon_combios.c b/linux-core/radeon_combios.c index e2b768ca..56116463 100644 --- a/linux-core/radeon_combios.c +++ b/linux-core/radeon_combios.c @@ -30,6 +30,57 @@ /* old legacy ATI BIOS routines */ +/* COMBIOS table offsets */ +enum radeon_combios_table_offset +{ + /* absolute offset tables */ + COMBIOS_ASIC_INIT_1_TABLE, + COMBIOS_BIOS_SUPPORT_TABLE, + COMBIOS_DAC_PROGRAMMING_TABLE, + COMBIOS_MAX_COLOR_DEPTH_TABLE, + COMBIOS_CRTC_INFO_TABLE, + COMBIOS_PLL_INFO_TABLE, + COMBIOS_TV_INFO_TABLE, + COMBIOS_DFP_INFO_TABLE, + COMBIOS_HW_CONFIG_INFO_TABLE, + COMBIOS_MULTIMEDIA_INFO_TABLE, + COMBIOS_TV_STD_PATCH_TABLE, + COMBIOS_LCD_INFO_TABLE, + COMBIOS_MOBILE_INFO_TABLE, + COMBIOS_PLL_INIT_TABLE, + COMBIOS_MEM_CONFIG_TABLE, + COMBIOS_SAVE_MASK_TABLE, + COMBIOS_HARDCODED_EDID_TABLE, + COMBIOS_ASIC_INIT_2_TABLE, + COMBIOS_CONNECTOR_INFO_TABLE, + COMBIOS_DYN_CLK_1_TABLE, + COMBIOS_RESERVED_MEM_TABLE, + COMBIOS_EXT_TDMS_INFO_TABLE, + COMBIOS_MEM_CLK_INFO_TABLE, + COMBIOS_EXT_DAC_INFO_TABLE, + COMBIOS_MISC_INFO_TABLE, + COMBIOS_CRT_INFO_TABLE, + COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE, + COMBIOS_COMPONENT_VIDEO_INFO_TABLE, + COMBIOS_FAN_SPEED_INFO_TABLE, + COMBIOS_OVERDRIVE_INFO_TABLE, + COMBIOS_OEM_INFO_TABLE, + COMBIOS_DYN_CLK_2_TABLE, + COMBIOS_POWER_CONNECTOR_INFO_TABLE, + COMBIOS_I2C_INFO_TABLE, + /* relative offset tables */ + COMBIOS_ASIC_INIT_3_TABLE, /* offset from misc info */ + COMBIOS_ASIC_INIT_4_TABLE, /* offset from misc info */ + COMBIOS_ASIC_INIT_5_TABLE, /* offset from misc info */ + COMBIOS_RAM_RESET_TABLE, /* offset from mem config */ + COMBIOS_POWERPLAY_TABLE, /* offset from mobile info */ + COMBIOS_GPIO_INFO_TABLE, /* offset from mobile info */ + COMBIOS_LCD_DDC_INFO_TABLE, /* offset from mobile info */ + COMBIOS_TMDS_POWER_TABLE, /* offset from mobile info */ + COMBIOS_TMDS_POWER_ON_TABLE, /* offset from tmds power */ + COMBIOS_TMDS_POWER_OFF_TABLE, /* offset from tmds power */ +}; + enum radeon_combios_ddc { DDC_NONE_DETECTED, @@ -53,12 +104,291 @@ enum radeon_combios_connector CONNECTOR_UNSUPPORTED_LEGACY }; +static uint16_t combios_get_table_offset(struct drm_device *dev, enum radeon_combios_table_offset table) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + int rev; + uint16_t offset = 0, check_offset; + + switch (table) { + /* absolute offset tables */ + case COMBIOS_ASIC_INIT_1_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0xc); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_BIOS_SUPPORT_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x14); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_DAC_PROGRAMMING_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x2a); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_MAX_COLOR_DEPTH_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x2c); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_CRTC_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x2e); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_PLL_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x30); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_TV_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x32); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_DFP_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x34); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_HW_CONFIG_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x36); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_MULTIMEDIA_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x38); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_TV_STD_PATCH_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x3e); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_LCD_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x40); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_MOBILE_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x42); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_PLL_INIT_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x46); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_MEM_CONFIG_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x48); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_SAVE_MASK_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x4a); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_HARDCODED_EDID_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x4c); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_ASIC_INIT_2_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x4e); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_CONNECTOR_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x50); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_DYN_CLK_1_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x52); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_RESERVED_MEM_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x54); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_EXT_TDMS_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x58); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_MEM_CLK_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x5a); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_EXT_DAC_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x5c); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_MISC_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x5e); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_CRT_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x60); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x62); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x64); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_FAN_SPEED_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x66); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_OVERDRIVE_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x68); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_OEM_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x6a); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_DYN_CLK_2_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x6c); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_POWER_CONNECTOR_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x6e); + if (check_offset) + offset = check_offset; + break; + case COMBIOS_I2C_INFO_TABLE: + check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x70); + if (check_offset) + offset = check_offset; + break; + /* relative offset tables */ + case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ + check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); + if (check_offset) { + rev = radeon_bios8(dev_priv, check_offset); + if (rev > 0) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x3); + if (check_offset) + offset = check_offset; + } + } + break; + case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */ + check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); + if (check_offset) { + rev = radeon_bios8(dev_priv, check_offset); + if (rev > 0) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x5); + if (check_offset) + offset = check_offset; + } + } + break; + case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */ + check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); + if (check_offset) { + rev = radeon_bios8(dev_priv, check_offset); + if (rev == 2) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x9); + if (check_offset) + offset = check_offset; + } + } + break; + case COMBIOS_RAM_RESET_TABLE: /* offset from mem config */ + check_offset = combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE); + if (check_offset) { + while (radeon_bios8(dev_priv, check_offset++)); + check_offset += 2; + if (check_offset) + offset = check_offset; + } + break; + case COMBIOS_POWERPLAY_TABLE: /* offset from mobile info */ + check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); + if (check_offset) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x11); + if (check_offset) + offset = check_offset; + } + break; + case COMBIOS_GPIO_INFO_TABLE: /* offset from mobile info */ + check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); + if (check_offset) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x13); + if (check_offset) + offset = check_offset; + } + break; + case COMBIOS_LCD_DDC_INFO_TABLE: /* offset from mobile info */ + check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); + if (check_offset) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x15); + if (check_offset) + offset = check_offset; + } + break; + case COMBIOS_TMDS_POWER_TABLE: /* offset from mobile info */ + check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); + if (check_offset) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x17); + if (check_offset) + offset = check_offset; + } + break; + case COMBIOS_TMDS_POWER_ON_TABLE: /* offset from tmds power */ + check_offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE); + if (check_offset) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x2); + if (check_offset) + offset = check_offset; + } + break; + case COMBIOS_TMDS_POWER_OFF_TABLE: /* offset from tmds power */ + check_offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE); + if (check_offset) { + check_offset = radeon_bios16(dev_priv, check_offset + 0x4); + if (check_offset) + offset = check_offset; + } + break; + default: + break; + } + + return offset; + +} + struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) { struct radeon_i2c_bus_rec i2c; - i2c.mask_clk_mask = RADEON_GPIO_EN_1 | RADEON_GPIO_Y_1; - i2c.mask_data_mask = RADEON_GPIO_EN_0 | RADEON_GPIO_Y_0; + i2c.mask_clk_mask = RADEON_GPIO_EN_1; + i2c.mask_data_mask = RADEON_GPIO_EN_0; + i2c.a_clk_mask = RADEON_GPIO_A_1; + i2c.a_data_mask = RADEON_GPIO_A_0; i2c.put_clk_mask = RADEON_GPIO_EN_1; i2c.put_data_mask = RADEON_GPIO_EN_0; i2c.get_clk_mask = RADEON_GPIO_Y_1; @@ -67,6 +397,8 @@ struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) (ddc_line == RADEON_MDGPIO_EN_REG)) { i2c.mask_clk_reg = ddc_line; i2c.mask_data_reg = ddc_line; + i2c.a_clk_reg = ddc_line; + i2c.a_data_reg = ddc_line; i2c.put_clk_reg = ddc_line; i2c.put_data_reg = ddc_line; i2c.get_clk_reg = ddc_line + 4; @@ -74,17 +406,19 @@ struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) } else { i2c.mask_clk_reg = ddc_line; i2c.mask_data_reg = ddc_line; + i2c.a_clk_reg = ddc_line; + i2c.a_data_reg = ddc_line; i2c.put_clk_reg = ddc_line; i2c.put_data_reg = ddc_line; i2c.get_clk_reg = ddc_line; i2c.get_data_reg = ddc_line; } - + if (ddc_line) i2c.valid = true; else i2c.valid = false; - + return i2c; } @@ -92,87 +426,315 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) { struct drm_radeon_private *dev_priv = dev->dev_private; struct radeon_mode_info *mode_info = &dev_priv->mode_info; - uint16_t pll_info_block; + uint16_t pll_info; struct radeon_pll *pll = &mode_info->pll; - int rev; + int8_t rev; - pll_info_block = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x30); - rev = radeon_bios8(dev_priv, pll_info_block); + pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); + if (pll_info) { + rev = radeon_bios8(dev_priv, pll_info); - pll->reference_freq = radeon_bios16(dev_priv, pll_info_block + 0xe); - pll->reference_div = radeon_bios16(dev_priv, pll_info_block + 0x10); - pll->pll_out_min = radeon_bios32(dev_priv, pll_info_block + 0x12); - pll->pll_out_max = radeon_bios32(dev_priv, pll_info_block + 0x16); + pll->reference_freq = radeon_bios16(dev_priv, pll_info + 0xe); + pll->reference_div = radeon_bios16(dev_priv, pll_info + 0x10); + pll->pll_out_min = radeon_bios32(dev_priv, pll_info + 0x12); + pll->pll_out_max = radeon_bios32(dev_priv, pll_info + 0x16); - if (rev > 9) { - pll->pll_in_min = radeon_bios32(dev_priv, pll_info_block + 0x36); - pll->pll_in_max = radeon_bios32(dev_priv, pll_info_block + 0x3a); - } else { - pll->pll_in_min = 40; - pll->pll_in_max = 500; + if (rev > 9) { + pll->pll_in_min = radeon_bios32(dev_priv, pll_info + 0x36); + pll->pll_in_max = radeon_bios32(dev_priv, pll_info + 0x3a); + } else { + pll->pll_in_min = 40; + pll->pll_in_max = 500; + } + + pll->xclk = radeon_bios16(dev_priv, pll_info + 0x08); + + // sclk/mclk use fixed point + //sclk = radeon_bios16(pll_info + 8) / 100.0; + //mclk = radeon_bios16(pll_info + 10) / 100.0; + //if (sclk == 0) sclk = 200; + //if (mclk == 0) mclk = 200; + + return true; } + return false; +} - pll->xclk = radeon_bios16(dev_priv, pll_info_block + 0x08); +bool radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint16_t dac_info; + uint8_t rev, bg, dac; - // sclk/mclk use fixed point - - return true; + /* first check TV table */ + dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); + if (dac_info) { + rev = radeon_bios8(dev_priv, dac_info + 0x3); + if (rev > 4) { + bg = radeon_bios8(dev_priv, dac_info + 0xc) & 0xf; + dac = radeon_bios8(dev_priv, dac_info + 0xd) & 0xf; + encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); + + bg = radeon_bios8(dev_priv, dac_info + 0xe) & 0xf; + dac = radeon_bios8(dev_priv, dac_info + 0xf) & 0xf; + encoder->pal_tvdac_adj = (bg << 16) | (dac << 20); + + bg = radeon_bios8(dev_priv, dac_info + 0x10) & 0xf; + dac = radeon_bios8(dev_priv, dac_info + 0x11) & 0xf; + encoder->ntsc_tvdac_adj = (bg << 16) | (dac << 20); + + return true; + } else if (rev > 1) { + bg = radeon_bios8(dev_priv, dac_info + 0xc) & 0xf; + dac = (radeon_bios8(dev_priv, dac_info + 0xc) >> 4) & 0xf; + encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); + + bg = radeon_bios8(dev_priv, dac_info + 0xd) & 0xf; + dac = (radeon_bios8(dev_priv, dac_info + 0xd) >> 4) & 0xf; + encoder->pal_tvdac_adj = (bg << 16) | (dac << 20); + + bg = radeon_bios8(dev_priv, dac_info + 0xe) & 0xf; + dac = (radeon_bios8(dev_priv, dac_info + 0xe) >> 4) & 0xf; + encoder->ntsc_tvdac_adj = (bg << 16) | (dac << 20); + + return true; + } + } + + /* then check CRT table */ + dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); + if (dac_info) { + rev = radeon_bios8(dev_priv, dac_info) & 0x3; + if (rev < 2) { + bg = radeon_bios8(dev_priv, dac_info + 0x3) & 0xf; + dac = (radeon_bios8(dev_priv, dac_info + 0x3) >> 4) & 0xf; + encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); + encoder->pal_tvdac_adj = encoder->ps2_tvdac_adj; + encoder->ntsc_tvdac_adj = encoder->ps2_tvdac_adj; + + return true; + } else { + bg = radeon_bios8(dev_priv, dac_info + 0x4) & 0xf; + dac = radeon_bios8(dev_priv, dac_info + 0x5) & 0xf; + encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); + encoder->pal_tvdac_adj = encoder->ps2_tvdac_adj; + encoder->ntsc_tvdac_adj = encoder->ps2_tvdac_adj; + + return true; + } + + } + return false; +} + +bool radeon_combios_get_tv_info(struct radeon_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint16_t tv_info; + + tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); + if (tv_info) { + if (radeon_bios8(dev_priv, tv_info + 6) == 'T') { + switch (radeon_bios8(dev_priv, tv_info + 7) & 0xf) { + case 1: + encoder->tv_std = TV_STD_NTSC; + DRM_INFO("Default TV standard: NTSC\n"); + break; + case 2: + encoder->tv_std = TV_STD_PAL; + DRM_INFO("Default TV standard: PAL\n"); + break; + case 3: + encoder->tv_std = TV_STD_PAL_M; + DRM_INFO("Default TV standard: PAL-M\n"); + break; + case 4: + encoder->tv_std = TV_STD_PAL_60; + DRM_INFO("Default TV standard: PAL-60\n"); + break; + case 5: + encoder->tv_std = TV_STD_NTSC_J; + DRM_INFO("Default TV standard: NTSC-J\n"); + break; + case 6: + encoder->tv_std = TV_STD_SCART_PAL; + DRM_INFO("Default TV standard: SCART-PAL\n"); + break; + default: + encoder->tv_std = TV_STD_NTSC; + DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); + break; + } + + switch ((radeon_bios8(dev_priv, tv_info + 9) >> 2) & 0x3) { + case 0: + DRM_INFO("29.498928713 MHz TV ref clk\n"); + break; + case 1: + DRM_INFO("28.636360000 MHz TV ref clk\n"); + break; + case 2: + DRM_INFO("14.318180000 MHz TV ref clk\n"); + break; + case 3: + DRM_INFO("27.000000000 MHz TV ref clk\n"); + break; + default: + break; + } + return true; + } + } + return false; } bool radeon_combios_get_lvds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct drm_radeon_private *dev_priv = dev->dev_private; - uint16_t tmp; + uint16_t lcd_info; + uint32_t panel_setup; char stmp[30]; - int tmp0; - int i; + int tmp, i; - tmp = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x40); - if (!tmp) { - DRM_INFO("No panel info found in BIOS\n"); - return false; + lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); - } + if (lcd_info) { + for (i = 0; i < 24; i++) + stmp[i] = radeon_bios8(dev_priv, lcd_info + i + 1); + stmp[24] = 0; + + DRM_INFO("Panel ID String: %s\n", stmp); - for (i = 0; i < 24; i++) - stmp[i] = radeon_bios8(dev_priv, tmp + i + 1); - stmp[24] = 0; + encoder->panel_xres = radeon_bios16(dev_priv, lcd_info + 25); + encoder->panel_yres = radeon_bios16(dev_priv, lcd_info + 27); - DRM_INFO("Panel ID String: %s\n", stmp); + DRM_INFO("Panel Size %dx%d\n", encoder->panel_xres, encoder->panel_yres); - encoder->panel_xres = radeon_bios16(dev_priv, tmp + 25); - encoder->panel_yres = radeon_bios16(dev_priv, tmp + 27); + encoder->panel_vcc_delay = radeon_bios16(dev_priv, lcd_info + 44); + if (encoder->panel_vcc_delay > 2000 || encoder->panel_vcc_delay < 0) + encoder->panel_vcc_delay = 2000; - DRM_INFO("Panel Size %dx%d\n", encoder->panel_xres, encoder->panel_yres); + encoder->panel_pwr_delay = radeon_bios16(dev_priv, lcd_info + 0x24); + encoder->panel_digon_delay = radeon_bios16(dev_priv, lcd_info + 0x38) & 0xf; + encoder->panel_blon_delay = (radeon_bios16(dev_priv, lcd_info + 0x38) >> 4) & 0xf; - encoder->panel_pwr_delay = radeon_bios16(dev_priv, tmp + 44); - if (encoder->panel_pwr_delay > 2000 || encoder->panel_pwr_delay < 0) - encoder->panel_pwr_delay = 2000; + encoder->panel_ref_divider = radeon_bios16(dev_priv, lcd_info + 46); + encoder->panel_post_divider = radeon_bios8(dev_priv, lcd_info + 48); + encoder->panel_fb_divider = radeon_bios16(dev_priv, lcd_info + 49); + if ((encoder->panel_ref_divider != 0) && + (encoder->panel_fb_divider > 3)) + encoder->use_bios_dividers = true; - for (i = 0; i < 32; i++) { - tmp0 = radeon_bios16(dev_priv, tmp + 64 + i * 2); - if (tmp0 == 0) break; + panel_setup = radeon_bios32(dev_priv, lcd_info + 0x39); + encoder->lvds_gen_cntl = 0; + if (panel_setup & 0x1) + encoder->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT; - if ((radeon_bios16(dev_priv, tmp0) == encoder->panel_xres) && - (radeon_bios16(dev_priv, tmp0 + 2) == encoder->panel_yres)) { - encoder->hblank = (radeon_bios16(dev_priv, tmp0 + 17) - - radeon_bios16(dev_priv, tmp0 + 19)) * 8; - encoder->hoverplus = (radeon_bios16(dev_priv, tmp0 + 21) - - radeon_bios16(dev_priv, tmp0 + 19) - 1) * 8; - encoder->hsync_width = radeon_bios8(dev_priv, tmp0 + 23) * 8; + if ((panel_setup >> 4) & 0x1) + encoder->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE; - encoder->vblank = (radeon_bios16(dev_priv, tmp0 + 24) - - radeon_bios16(dev_priv, tmp0 + 26)); - encoder->voverplus = ((radeon_bios16(dev_priv, tmp0 + 28) & 0x7fff) - - radeon_bios16(dev_priv, tmp0 + 26)); - encoder->vsync_width = ((radeon_bios16(dev_priv, tmp0 + 28) & 0xf800) >> 11); - encoder->dotclock = radeon_bios16(dev_priv, tmp0 + 9) * 10; - encoder->flags = 0; + switch ((panel_setup >> 8) & 0x8) { + case 0: + encoder->lvds_gen_cntl |= RADEON_LVDS_NO_FM; + break; + case 1: + encoder->lvds_gen_cntl |= RADEON_LVDS_2_GREY; + break; + case 2: + encoder->lvds_gen_cntl |= RADEON_LVDS_4_GREY; + break; + default: + break; } + + if ((panel_setup >> 16) & 0x1) + encoder->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW; + + if ((panel_setup >> 17) & 0x1) + encoder->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW; + + if ((panel_setup >> 18) & 0x1) + encoder->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW; + + if ((panel_setup >> 23) & 0x1) + encoder->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL; + + encoder->lvds_gen_cntl |= (panel_setup & 0xf0000000); + + + for (i = 0; i < 32; i++) { + tmp = radeon_bios16(dev_priv, lcd_info + 64 + i * 2); + if (tmp == 0) break; + + if ((radeon_bios16(dev_priv, tmp) == encoder->panel_xres) && + (radeon_bios16(dev_priv, tmp + 2) == encoder->panel_yres)) { + encoder->hblank = (radeon_bios16(dev_priv, tmp + 17) - + radeon_bios16(dev_priv, tmp + 19)) * 8; + encoder->hoverplus = (radeon_bios16(dev_priv, tmp + 21) - + radeon_bios16(dev_priv, tmp + 19) - 1) * 8; + encoder->hsync_width = radeon_bios8(dev_priv, tmp + 23) * 8; + + encoder->vblank = (radeon_bios16(dev_priv, tmp + 24) - + radeon_bios16(dev_priv, tmp + 26)); + encoder->voverplus = ((radeon_bios16(dev_priv, tmp + 28) & 0x7fff) - + radeon_bios16(dev_priv, tmp + 26)); + encoder->vsync_width = ((radeon_bios16(dev_priv, tmp + 28) & 0xf800) >> 11); + encoder->dotclock = radeon_bios16(dev_priv, tmp + 9) * 10; + encoder->flags = 0; + } + } + return true; } - return true; + DRM_INFO("No panel info found in BIOS\n"); + return false; + +} + +bool radeon_combios_get_tmds_info(struct radeon_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint16_t tmds_info; + int i, n; + uint8_t ver; + + tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); + + if (tmds_info) { + ver = radeon_bios8(dev_priv, tmds_info); + DRM_INFO("DFP table revision: %d\n", ver); + if (ver == 3) { + n = radeon_bios8(dev_priv, tmds_info + 5) + 1; + if (n > 4) + n = 4; + for (i = 0; i < n; i++) { + encoder->tmds_pll[i].value = radeon_bios32(dev_priv, tmds_info + i * 10 + 0x08); + encoder->tmds_pll[i].freq = radeon_bios16(dev_priv, tmds_info + i * 10 + 0x10); + } + return true; + } else if (ver == 4) { + int stride = 0; + n = radeon_bios8(dev_priv, tmds_info + 5) + 1; + if (n > 4) + n = 4; + for (i = 0; i < n; i++) { + encoder->tmds_pll[i].value = radeon_bios32(dev_priv, tmds_info + stride + 0x08); + encoder->tmds_pll[i].freq = radeon_bios16(dev_priv, tmds_info + stride + 0x10); + if (i == 0) + stride += 10; + else + stride += 6; + } + return true; + } + } + + DRM_INFO("No TMDS info found in BIOS\n"); + return false; } static void radeon_apply_legacy_quirks(struct drm_device *dev, int bios_index) @@ -180,27 +742,31 @@ static void radeon_apply_legacy_quirks(struct drm_device *dev, int bios_index) struct drm_radeon_private *dev_priv = dev->dev_private; struct radeon_mode_info *mode_info = &dev_priv->mode_info; - /* on XPRESS chips, CRT2_DDC and MONID_DCC both use the - * MONID gpio, but use different pins. - * CRT2_DDC uses the standard pinout, MONID_DDC uses - * something else. - */ + /* XPRESS DDC quirks */ if ((dev_priv->chip_family == CHIP_RS400 || dev_priv->chip_family == CHIP_RS480) && - mode_info->bios_connector[bios_index].connector_type == CONNECTOR_VGA && mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_reg == RADEON_GPIO_CRT2_DDC) { mode_info->bios_connector[bios_index].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); - } - - /* XPRESS desktop chips seem to have a proprietary connector listed for - * DVI-D, try and do the right thing here. - */ - if ((dev_priv->flags & RADEON_IS_MOBILITY) && - (mode_info->bios_connector[bios_index].connector_type == CONNECTOR_LVDS)) { - DRM_INFO("proprietary connector found. assuming DVI-D\n"); - mode_info->bios_connector[bios_index].dac_type = DAC_NONE; - mode_info->bios_connector[bios_index].tmds_type = TMDS_EXT; - mode_info->bios_connector[bios_index].connector_type = CONNECTOR_DVI_D; + } else if ((dev_priv->chip_family == CHIP_RS400 || + dev_priv->chip_family == CHIP_RS480) && + mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_reg == RADEON_GPIO_MONID) { + mode_info->bios_connector[bios_index].ddc_i2c.valid = true; + mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_mask = (0x20 << 8); + mode_info->bios_connector[bios_index].ddc_i2c.mask_data_mask = 0x80; + mode_info->bios_connector[bios_index].ddc_i2c.a_clk_mask = (0x20 << 8); + mode_info->bios_connector[bios_index].ddc_i2c.a_data_mask = 0x80; + mode_info->bios_connector[bios_index].ddc_i2c.put_clk_mask = (0x20 << 8); + mode_info->bios_connector[bios_index].ddc_i2c.put_data_mask = 0x80; + mode_info->bios_connector[bios_index].ddc_i2c.get_clk_mask = (0x20 << 8); + mode_info->bios_connector[bios_index].ddc_i2c.get_data_mask = 0x80; + mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_reg = RADEON_GPIOPAD_MASK; + mode_info->bios_connector[bios_index].ddc_i2c.mask_data_reg = RADEON_GPIOPAD_MASK; + mode_info->bios_connector[bios_index].ddc_i2c.a_clk_reg = RADEON_GPIOPAD_A; + mode_info->bios_connector[bios_index].ddc_i2c.a_data_reg = RADEON_GPIOPAD_A; + mode_info->bios_connector[bios_index].ddc_i2c.put_clk_reg = RADEON_GPIOPAD_EN; + mode_info->bios_connector[bios_index].ddc_i2c.put_data_reg = RADEON_GPIOPAD_EN; + mode_info->bios_connector[bios_index].ddc_i2c.get_clk_reg = RADEON_LCD_GPIO_Y_REG; + mode_info->bios_connector[bios_index].ddc_i2c.get_data_reg = RADEON_LCD_GPIO_Y_REG; } /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, @@ -222,27 +788,36 @@ static void radeon_apply_legacy_quirks(struct drm_device *dev, int bios_index) } + /* X300 card with extra non-existent DVI port */ + if (dev->pdev->device == 0x5B60 && + dev->pdev->subsystem_vendor == 0x17af && + dev->pdev->subsystem_device == 0x201e && + bios_index == 2) { + if (mode_info->bios_connector[bios_index].connector_type == CONNECTOR_DVI_I) + mode_info->bios_connector[bios_index].valid = false; + } + } bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) { struct drm_radeon_private *dev_priv = dev->dev_private; struct radeon_mode_info *mode_info = &dev_priv->mode_info; - uint32_t offset, entry; - uint16_t tmp0, tmp1, tmp; - enum radeon_combios_ddc ddctype; + uint32_t conn_info, entry; + uint16_t tmp; + enum radeon_combios_ddc ddc_type; enum radeon_combios_connector connector_type; int i; - + DRM_DEBUG("\n"); - offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x50); - if (offset) { + conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE); + if (conn_info) { for (i = 0; i < 4; i++) { - entry = offset + 2 + i * 2; - + entry = conn_info + 2 + i * 2; + if (!radeon_bios16(dev_priv, entry)) break; - + mode_info->bios_connector[i].valid = true; tmp = radeon_bios16(dev_priv, entry); @@ -252,7 +827,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) switch(connector_type) { case CONNECTOR_PROPRIETARY_LEGACY: - mode_info->bios_connector[i].connector_type = CONNECTOR_LVDS; + mode_info->bios_connector[i].connector_type = CONNECTOR_DVI_D; break; case CONNECTOR_CRT_LEGACY: mode_info->bios_connector[i].connector_type = CONNECTOR_VGA; @@ -277,8 +852,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) mode_info->bios_connector[i].ddc_i2c.valid = false; - ddctype = (tmp >> 8) & 0xf; - switch (ddctype) { + ddc_type = (tmp >> 8) & 0xf; + switch (ddc_type) { case DDC_MONID: mode_info->bios_connector[i].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); break; @@ -313,9 +888,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) radeon_apply_legacy_quirks(dev, i); } } else { - DRM_INFO("no connector table found in BIOS\n"); - offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x34); - if (offset) { + uint16_t tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); + if (tmds_info) { DRM_DEBUG("Found DFP table, assuming DVI connector\n"); mode_info->bios_connector[0].valid = true; @@ -324,50 +898,113 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) mode_info->bios_connector[0].tmds_type = TMDS_INT; mode_info->bios_connector[0].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); } else { - DRM_DEBUG("No table found\n"); + DRM_DEBUG("No connector info found\n"); return false; } } - if (dev_priv->flags & RADEON_IS_MOBILITY) { - offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x40); - if (offset) { + if (dev_priv->flags & RADEON_IS_MOBILITY || + dev_priv->chip_family == CHIP_RS400 || + dev_priv->chip_family == CHIP_RS480) { + uint16_t lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); + if (lcd_info) { + uint16_t lcd_ddc_info = lcd_ddc_info = combios_get_table_offset(dev, COMBIOS_LCD_DDC_INFO_TABLE); + mode_info->bios_connector[4].valid = true; mode_info->bios_connector[4].connector_type = CONNECTOR_LVDS; mode_info->bios_connector[4].dac_type = DAC_NONE; mode_info->bios_connector[4].tmds_type = TMDS_NONE; mode_info->bios_connector[4].ddc_i2c.valid = false; - tmp = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x42); - if (tmp) { - tmp0 = radeon_bios16(dev_priv, tmp + 0x15); - if (tmp0) { - tmp1 = radeon_bios8(dev_priv, tmp0 + 2) & 0x07; - if (tmp1) { - ddctype = tmp1; - - switch(ddctype) { - case DDC_MONID: - case DDC_DVI: - case DDC_CRT2: - case DDC_LCD: - case DDC_GPIO: - default: - break; - } - DRM_DEBUG("LCD DDC Info Table found!\n"); - } + if (lcd_ddc_info) { + ddc_type = radeon_bios8(dev_priv, lcd_ddc_info + 2); + switch(ddc_type) { + case DDC_MONID: + mode_info->bios_connector[4].ddc_i2c = + combios_setup_i2c_bus(RADEON_GPIO_MONID); + break; + case DDC_DVI: + mode_info->bios_connector[4].ddc_i2c = + combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + break; + case DDC_VGA: + mode_info->bios_connector[4].ddc_i2c = + combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + break; + case DDC_CRT2: + mode_info->bios_connector[4].ddc_i2c = + combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); + break; + case DDC_LCD: + mode_info->bios_connector[4].ddc_i2c = + combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); + mode_info->bios_connector[4].ddc_i2c.mask_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.mask_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + mode_info->bios_connector[4].ddc_i2c.a_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.a_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + mode_info->bios_connector[4].ddc_i2c.put_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.put_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + mode_info->bios_connector[4].ddc_i2c.get_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.get_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + break; + case DDC_GPIO: + mode_info->bios_connector[4].ddc_i2c = + combios_setup_i2c_bus(RADEON_MDGPIO_EN_REG); + mode_info->bios_connector[4].ddc_i2c.mask_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.mask_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + mode_info->bios_connector[4].ddc_i2c.a_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.a_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + mode_info->bios_connector[4].ddc_i2c.put_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.put_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + mode_info->bios_connector[4].ddc_i2c.get_clk_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 3); + mode_info->bios_connector[4].ddc_i2c.get_data_mask = + radeon_bios32(dev_priv, lcd_ddc_info + 7); + break; + default: + break; } - } else - mode_info->bios_connector[4].ddc_i2c.valid = false; + DRM_DEBUG("LCD DDC Info Table found!\n"); + } + } else + mode_info->bios_connector[4].ddc_i2c.valid = false; + } + + /* check TV table */ + if (dev_priv->chip_family != CHIP_R100 && + dev_priv->chip_family != CHIP_R200) { + uint32_t tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); + if (tv_info) { + if (radeon_bios8(dev_priv, tv_info + 6) == 'T') { + mode_info->bios_connector[5].valid = true; + mode_info->bios_connector[5].connector_type = CONNECTOR_DIN; + mode_info->bios_connector[5].dac_type = DAC_TVDAC; + mode_info->bios_connector[5].tmds_type = TMDS_NONE; + mode_info->bios_connector[5].ddc_i2c.valid = false; + } } } + DRM_DEBUG("BIOS Connector table\n"); for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { if (!mode_info->bios_connector[i].valid) continue; - + DRM_DEBUG("Port %d: ddc_type 0x%x, dac_type %d, tmds_type %d, connector type %d, hpd_mask %d\n", i, mode_info->bios_connector[i].ddc_i2c.mask_clk_reg, mode_info->bios_connector[i].dac_type, @@ -378,4 +1015,259 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) return true; } - + +static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + + if (offset) { + while (radeon_bios16(dev_priv, offset)) { + uint16_t cmd = ((radeon_bios16(dev_priv, offset) & 0xe000) >> 13); + uint32_t addr = (radeon_bios16(dev_priv, offset) & 0x1fff); + uint32_t val, and_mask, or_mask; + uint32_t tmp; + + offset += 2; + switch (cmd) { + case 0: + val = radeon_bios32(dev_priv, offset); + offset += 4; + RADEON_WRITE(addr, val); + break; + case 1: + val = radeon_bios32(dev_priv, offset); + offset += 4; + RADEON_WRITE(addr, val); + break; + case 2: + and_mask = radeon_bios32(dev_priv, offset); + offset += 4; + or_mask = radeon_bios32(dev_priv, offset); + offset += 4; + tmp = RADEON_READ(addr); + tmp &= and_mask; + tmp |= or_mask; + RADEON_WRITE(addr, tmp); + break; + case 3: + and_mask = radeon_bios32(dev_priv, offset); + offset += 4; + or_mask = radeon_bios32(dev_priv, offset); + offset += 4; + tmp = RADEON_READ(addr); + tmp &= and_mask; + tmp |= or_mask; + RADEON_WRITE(addr, tmp); + break; + case 4: + val = radeon_bios16(dev_priv, offset); + offset += 2; + udelay(val); + break; + case 5: + val = radeon_bios16(dev_priv, offset); + offset += 2; + switch (addr) { + case 8: + while (val--) { + if (!(RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL) & + RADEON_MC_BUSY)) + break; + } + break; + case 9: + while (val--) { + if ((RADEON_READ(RADEON_MC_STATUS) & + RADEON_MC_IDLE)) + break; + } + break; + default: + break; + } + break; + default: + break; + } + } + } +} + +static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + + if (offset) { + while (radeon_bios8(dev_priv, offset)) { + uint8_t cmd = ((radeon_bios8(dev_priv, offset) & 0xc0) >> 6); + uint8_t addr = (radeon_bios8(dev_priv, offset) & 0x3f); + uint32_t val, shift, tmp; + uint32_t and_mask, or_mask; + + offset++; + switch (cmd) { + case 0: + val = radeon_bios32(dev_priv, offset); + offset += 4; + RADEON_WRITE_PLL(dev_priv, addr, val); + break; + case 1: + shift = radeon_bios8(dev_priv, offset) * 8; + offset++; + and_mask = radeon_bios8(dev_priv, offset) << shift; + and_mask |= ~(0xff << shift); + offset++; + or_mask = radeon_bios8(dev_priv, offset) << shift; + offset++; + tmp = RADEON_READ_PLL(dev_priv, addr); + tmp &= and_mask; + tmp |= or_mask; + RADEON_WRITE_PLL(dev_priv, addr, tmp); + break; + case 2: + case 3: + tmp = 1000; + switch (addr) { + case 1: + udelay(150); + break; + case 2: + udelay(1000); + break; + case 3: + while (tmp--) { + if (!(RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL) & + RADEON_MC_BUSY)) + break; + } + break; + case 4: + while (tmp--) { + if (RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL) & + RADEON_DLL_READY) + break; + } + break; + case 5: + tmp = RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL); + if (tmp & RADEON_CG_NO1_DEBUG_0) { +#if 0 + uint32_t mclk_cntl = RADEON_READ_PLL(RADEON_MCLK_CNTL); + mclk_cntl &= 0xffff0000; + //mclk_cntl |= 0x00001111; /* ??? */ + RADEON_WRITE_PLL(dev_priv, RADEON_MCLK_CNTL, mclk_cntl); + udelay(10000); +#endif + RADEON_WRITE_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL, + tmp & ~RADEON_CG_NO1_DEBUG_0); + udelay(10000); + } + break; + default: + break; + } + break; + default: + break; + } + } + } +} + +static void combios_parse_ram_reset_table(struct drm_device *dev, uint16_t offset) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + uint32_t tmp; + + if (offset) { + uint8_t val = radeon_bios8(dev_priv, offset); + while (val != 0xff) { + offset++; + + if (val == 0x0f) { + uint32_t channel_complete_mask; + + if (radeon_is_r300(dev_priv)) + channel_complete_mask = R300_MEM_PWRUP_COMPLETE; + else + channel_complete_mask = RADEON_MEM_PWRUP_COMPLETE; + tmp = 20000; + while (tmp--) { + if ((RADEON_READ(RADEON_MEM_STR_CNTL) & + channel_complete_mask) == + channel_complete_mask) + break; + } + } else { + uint32_t or_mask = radeon_bios16(dev_priv, offset); + offset += 2; + + tmp = RADEON_READ(RADEON_MEM_SDRAM_MODE_REG); + tmp &= RADEON_SDRAM_MODE_MASK; + tmp |= or_mask; + RADEON_WRITE(RADEON_MEM_SDRAM_MODE_REG, tmp); + + or_mask = val << 24; + tmp = RADEON_READ(RADEON_MEM_SDRAM_MODE_REG); + tmp &= RADEON_B3MEM_RESET_MASK; + tmp |= or_mask; + RADEON_WRITE(RADEON_MEM_SDRAM_MODE_REG, tmp); + } + val = radeon_bios8(dev_priv, offset); + } + } +} + +void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable) +{ + uint16_t dyn_clk_info = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); + + if (dyn_clk_info) + combios_parse_pll_table(dev, dyn_clk_info); +} + +void radeon_combios_asic_init(struct drm_device *dev) +{ + uint16_t table; + + /* ASIC INIT 1 */ + table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE); + if (table) + combios_parse_mmio_table(dev, table); + + /* PLL INIT */ + table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE); + if (table) + combios_parse_pll_table(dev, table); + + /* ASIC INIT 2 */ + table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE); + if (table) + combios_parse_mmio_table(dev, table); + + /* ASIC INIT 4 */ + table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE); + if (table) + combios_parse_mmio_table(dev, table); + + /* RAM RESET */ + table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE); + if (table) + combios_parse_ram_reset_table(dev, table); + + /* ASIC INIT 3 */ + table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE); + if (table) + combios_parse_mmio_table(dev, table); + + /* DYN CLK 1 */ + table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); + if (table) + combios_parse_pll_table(dev, table); + + /* ASIC INIT 5 */ + table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_5_TABLE); + if (table) + combios_parse_mmio_table(dev, table); + +} diff --git a/linux-core/radeon_connectors.c b/linux-core/radeon_connectors.c index 344b4f77..32f969ab 100644 --- a/linux-core/radeon_connectors.c +++ b/linux-core/radeon_connectors.c @@ -36,14 +36,14 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) int ret = 0; struct edid *edid; - avivo_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector, 1); edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - avivo_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector, 0); if (edid) { drm_mode_connector_update_edid_property(&radeon_connector->base, edid); ret = drm_add_edid_modes(&radeon_connector->base, edid); kfree(edid); - return 0; + return ret; } #if 0 @@ -53,7 +53,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) return ret; radeon_encoder_update_panel_size(lvds_encoder, connector); -#endif +#endif return ret; } @@ -111,33 +111,33 @@ struct drm_connector_funcs radeon_lvds_connector_funcs = { .destroy = radeon_connector_destroy, }; -static int radeon_atom_vga_get_modes(struct drm_connector *connector) +static int radeon_vga_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); int ret; ret = radeon_ddc_get_modes(radeon_connector); - + return ret; } -static int radeon_atom_vga_mode_valid(struct drm_connector *connector, +static int radeon_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { return MODE_OK; } -static enum drm_connector_status radeon_atom_vga_detect(struct drm_connector *connector) +static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) { struct edid *edid; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; - avivo_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector, 1); edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - avivo_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector, 0); if (edid) { kfree(edid); return connector_status_connected; @@ -152,20 +152,20 @@ static enum drm_connector_status radeon_atom_vga_detect(struct drm_connector *co return encoder_funcs->detect(encoder, connector); } -struct drm_connector_helper_funcs radeon_atom_vga_connector_helper_funcs = { - .get_modes = radeon_atom_vga_get_modes, - .mode_valid = radeon_atom_vga_mode_valid, +struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { + .get_modes = radeon_vga_get_modes, + .mode_valid = radeon_vga_mode_valid, .best_encoder = radeon_best_single_encoder, }; -struct drm_connector_funcs radeon_atom_vga_connector_funcs = { - .detect = radeon_atom_vga_detect, +struct drm_connector_funcs radeon_vga_connector_funcs = { + .detect = radeon_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, }; -static enum drm_connector_status radeon_atom_dvi_detect(struct drm_connector *connector) +static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) { struct edid *edid; struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -175,9 +175,9 @@ static enum drm_connector_status radeon_atom_dvi_detect(struct drm_connector *co int i; enum drm_connector_status ret; - avivo_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector, 1); edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - avivo_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector, 0); if (edid) { /* if the monitor is digital - set the bits */ if (edid->digital) @@ -212,7 +212,7 @@ static enum drm_connector_status radeon_atom_dvi_detect(struct drm_connector *co } /* okay need to be smart in here about which encoder to pick */ -struct drm_encoder *radeon_atom_dvi_encoder(struct drm_connector *connector) +struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -253,14 +253,14 @@ struct drm_encoder *radeon_atom_dvi_encoder(struct drm_connector *connector) return NULL; } -struct drm_connector_helper_funcs radeon_atom_dvi_connector_helper_funcs = { - .get_modes = radeon_atom_vga_get_modes, - .mode_valid = radeon_atom_vga_mode_valid, - .best_encoder = radeon_atom_dvi_encoder, +struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { + .get_modes = radeon_vga_get_modes, + .mode_valid = radeon_vga_mode_valid, + .best_encoder = radeon_dvi_encoder, }; -struct drm_connector_funcs radeon_atom_dvi_connector_funcs = { - .detect = radeon_atom_dvi_detect, +struct drm_connector_funcs radeon_dvi_connector_funcs = { + .detect = radeon_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, }; @@ -272,12 +272,12 @@ static struct connector_funcs { struct drm_connector_helper_funcs *helper_funcs; int conn_type; char *i2c_id; -} connector_fns[] = { +} connector_fns[] = { { CONNECTOR_NONE, NULL, NULL, DRM_MODE_CONNECTOR_Unknown }, - { CONNECTOR_VGA, &radeon_atom_vga_connector_funcs, &radeon_atom_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA , "VGA"}, + { CONNECTOR_VGA, &radeon_vga_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA , "VGA"}, { CONNECTOR_LVDS, &radeon_lvds_connector_funcs, &radeon_lvds_connector_helper_funcs, DRM_MODE_CONNECTOR_LVDS, "LVDS" }, - { CONNECTOR_DVI_A, &radeon_atom_vga_connector_funcs, &radeon_atom_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_DVIA, "DVI" }, - { CONNECTOR_DVI_I, &radeon_atom_dvi_connector_funcs, &radeon_atom_dvi_connector_helper_funcs, DRM_MODE_CONNECTOR_DVII, "DVI" }, + { CONNECTOR_DVI_A, &radeon_vga_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_DVIA, "DVI" }, + { CONNECTOR_DVI_I, &radeon_dvi_connector_funcs, &radeon_dvi_connector_helper_funcs, DRM_MODE_CONNECTOR_DVII, "DVI" }, #if 0 { CONNECTOR_DVI_D, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, @@ -294,14 +294,14 @@ static struct connector_funcs { { CONNECTOR_DIN, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, { CONNECTOR_DISPLAY_PORT, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, #endif -}; - +}; + struct drm_connector *radeon_connector_add(struct drm_device *dev, int bios_index) { struct radeon_connector *radeon_connector; struct drm_radeon_private *dev_priv = dev->dev_private; struct radeon_mode_info *mode_info = &dev_priv->mode_info; - struct drm_connector *connector; + struct drm_connector *connector; int table_idx; for (table_idx = 0; table_idx < ARRAY_SIZE(connector_fns); table_idx++) { @@ -323,7 +323,7 @@ struct drm_connector *radeon_connector_add(struct drm_device *dev, int bios_inde connector_fns[table_idx].conn_type); drm_connector_helper_add(&radeon_connector->base, connector_fns[table_idx].helper_funcs); - + if (mode_info->bios_connector[bios_index].ddc_i2c.valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, &mode_info->bios_connector[bios_index].ddc_i2c, connector_fns[table_idx].i2c_id); diff --git a/linux-core/radeon_cs.c b/linux-core/radeon_cs.c new file mode 120000 index 00000000..0e3fa8ee --- /dev/null +++ b/linux-core/radeon_cs.c @@ -0,0 +1 @@ +../shared-core/radeon_cs.c
\ No newline at end of file diff --git a/linux-core/radeon_cursor.c b/linux-core/radeon_cursor.c new file mode 100644 index 00000000..e71b325d --- /dev/null +++ b/linux-core/radeon_cursor.c @@ -0,0 +1,243 @@ +/* + * Copyright 2007-8 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + */ +#include "drmP.h" +#include "radeon_drm.h" +#include "radeon_drv.h" + +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + +static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) +{ + struct drm_radeon_private *dev_priv = crtc->dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + uint32_t cur_lock; + + if (radeon_is_avivo(dev_priv)) { + cur_lock = RADEON_READ(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); + if (lock) + cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; + else + cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK; + RADEON_WRITE(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); + } else { + switch(radeon_crtc->crtc_id) { + case 0: + cur_lock = RADEON_READ(RADEON_CUR_OFFSET); + if (lock) + cur_lock |= RADEON_CUR_LOCK; + else + cur_lock &= ~RADEON_CUR_LOCK; + RADEON_WRITE(RADEON_CUR_OFFSET, cur_lock); + break; + case 1: + cur_lock = RADEON_READ(RADEON_CUR2_OFFSET); + if (lock) + cur_lock |= RADEON_CUR2_LOCK; + else + cur_lock &= ~RADEON_CUR2_LOCK; + RADEON_WRITE(RADEON_CUR2_OFFSET, cur_lock); + break; + default: + break; + } + } +} + +static void radeon_hide_cursor(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_radeon_private *dev_priv = crtc->dev->dev_private; + + if (radeon_is_avivo(dev_priv)) { + RADEON_WRITE(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); + RADEON_WRITE_P(RADEON_MM_DATA, 0, ~AVIVO_D1CURSOR_EN); + } else { + switch(radeon_crtc->crtc_id) { + case 0: + RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); + break; + case 1: + RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); + break; + default: + return; + } + RADEON_WRITE_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN); + } +} + +static void radeon_show_cursor(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_radeon_private *dev_priv = crtc->dev->dev_private; + + if (radeon_is_avivo(dev_priv)) { + RADEON_WRITE(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); + RADEON_WRITE(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | + (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); + } else { + switch(radeon_crtc->crtc_id) { + case 0: + RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); + break; + case 1: + RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); + break; + default: + return; + } + + RADEON_WRITE_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN | + (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)), + ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK)); + } +} + +static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, + uint32_t width, uint32_t height) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_radeon_private *dev_priv = crtc->dev->dev_private; + struct drm_radeon_gem_object *obj_priv; + + obj_priv = obj->driver_private; + + if (radeon_is_avivo(dev_priv)) { + RADEON_WRITE(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + dev_priv->fb_location + obj_priv->bo->offset); + RADEON_WRITE(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, + (width - 1) << 16 | (height - 1)); + } else { + switch(radeon_crtc->crtc_id) { + case 0: + /* offset is from DISP_BASE_ADDRESS */ + RADEON_WRITE(RADEON_CUR_OFFSET, obj_priv->bo->offset); + break; + case 1: + /* offset is from DISP2_BASE_ADDRESS */ + RADEON_WRITE(RADEON_CUR2_OFFSET, obj_priv->bo->offset); + break; + default: + break; + } + } +} + +int radeon_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_gem_object *obj; + + if (!handle) { + /* turn off cursor */ + radeon_hide_cursor(crtc); + return 0; + } + + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); + return -EINVAL; + } + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + DRM_ERROR("bad cursor width or height %d x %d\n", width, height); + return -EINVAL; + } + + radeon_lock_cursor(crtc, true); + // XXX only 27 bit offset for legacy cursor + radeon_set_cursor(crtc, obj, width, height); + radeon_show_cursor(crtc); + radeon_lock_cursor(crtc, false); + + mutex_lock(&crtc->dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&crtc->dev->struct_mutex); + + return 0; +} + +int radeon_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_radeon_private *dev_priv = crtc->dev->dev_private; + int xorigin = 0, yorigin = 0; + + if (x < 0) + xorigin = -x + 1; + if (y < 0) + yorigin = -x + 1; + if (xorigin >= CURSOR_WIDTH) + xorigin = CURSOR_WIDTH - 1; + if (yorigin >= CURSOR_WIDTH) + yorigin = CURSOR_WIDTH - 1; + + radeon_lock_cursor(crtc, true); + if (radeon_is_avivo(dev_priv)) { + RADEON_WRITE(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, + ((xorigin ? 0: x) << 16) | + (yorigin ? 0 : y)); + RADEON_WRITE(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); + } else { + if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) + y /= 2; + else if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) + y *= 2; + + switch(radeon_crtc->crtc_id) { + case 0: + RADEON_WRITE(RADEON_CUR_HORZ_VERT_OFF, (RADEON_CUR_LOCK + | (xorigin << 16) + | yorigin)); + RADEON_WRITE(RADEON_CUR_HORZ_VERT_POSN, (RADEON_CUR_LOCK + | ((xorigin ? 0 : x) << 16) + | (yorigin ? 0 : y))); + break; + case 1: + RADEON_WRITE(RADEON_CUR2_HORZ_VERT_OFF, (RADEON_CUR2_LOCK + | (xorigin << 16) + | yorigin)); + RADEON_WRITE(RADEON_CUR2_HORZ_VERT_POSN, (RADEON_CUR2_LOCK + | ((xorigin ? 0 : x) << 16) + | (yorigin ? 0 : y))); + break; + default: + break; + } + + } + radeon_lock_cursor(crtc, false); + + return 0; +} + diff --git a/linux-core/radeon_display.c b/linux-core/radeon_display.c index 7a7b5856..39eb91eb 100644 --- a/linux-core/radeon_display.c +++ b/linux-core/radeon_display.c @@ -32,9 +32,6 @@ #include "drm_crtc_helper.h" -#define CURSOR_WIDTH 64 -#define CURSOR_HEIGHT 64 - int radeon_ddc_dump(struct drm_connector *connector); @@ -48,11 +45,11 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) DRM_DEBUG("%d\n", radeon_crtc->crtc_id); RADEON_WRITE(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); - + RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); - + RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); @@ -62,7 +59,6 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) RADEON_WRITE(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); for (i = 0; i < 256; i++) { - RADEON_WRITE8(AVIVO_DC_LUT_RW_INDEX, i); RADEON_WRITE(AVIVO_DC_LUT_30_COLOR, (radeon_crtc->lut_r[i] << 22) | @@ -73,34 +69,43 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) RADEON_WRITE(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); } - -void radeon_crtc_load_lut(struct drm_crtc *crtc) +static void legacy_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_radeon_private *dev_priv = dev->dev_private; - u32 temp; int i; - if (!crtc->enabled) - return; + uint32_t dac2_cntl; - - if (radeon_is_avivo(dev_priv)) { - avivo_crtc_load_lut(crtc); - return; - } - - temp = RADEON_READ(RADEON_DAC_CNTL2); + dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2); if (radeon_crtc->crtc_id == 0) - temp &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; + dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; else - temp |= RADEON_DAC2_PALETTE_ACC_CTL; - RADEON_WRITE(RADEON_DAC_CNTL2, temp); + dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; + RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); for (i = 0; i < 256; i++) { -// OUTPAL(i, radeon_crtc->lut_r[i], radeon_crtc->lut_g[i], radeon_crtc->lut_b[i]); + RADEON_WRITE8(RADEON_PALETTE_INDEX, i); + RADEON_WRITE(RADEON_PALETTE_DATA, + (radeon_crtc->lut_r[i] << 16) | + (radeon_crtc->lut_g[i] << 8) | + (radeon_crtc->lut_b[i] << 0)); } +} +void radeon_crtc_load_lut(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + + if (!crtc->enabled) + return; + + if (radeon_is_avivo(dev_priv)) + avivo_crtc_load_lut(crtc); + else + legacy_crtc_load_lut(crtc); } /** Sets the color ramps on behalf of RandR */ @@ -116,140 +121,32 @@ void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, radeon_crtc->lut_b[regno] = blue >> 8; } -void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) -{ -} - - - -static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - -static void radeon_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y) -{ - -} - -void radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y) -{ -} - -static void radeon_crtc_prepare(struct drm_crtc *crtc) -{ -} - -static void radeon_crtc_commit(struct drm_crtc *crtc) -{ -} - -static void avivo_lock_cursor(struct drm_crtc *crtc, bool lock) -{ - struct drm_radeon_private *dev_priv = crtc->dev->dev_private; - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - - uint32_t tmp; - - tmp = RADEON_READ(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); - if (lock) - tmp |= AVIVO_D1CURSOR_UPDATE_LOCK; - else - tmp &= ~AVIVO_D1CURSOR_UPDATE_LOCK; - - RADEON_WRITE(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, tmp); -} - -static int radeon_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct drm_radeon_private *dev_priv = crtc->dev->dev_private; - struct drm_gem_object *obj; - struct drm_radeon_gem_object *obj_priv; - - if (!handle) { - RADEON_WRITE(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset, 0); - return 0; - /* turn off cursor */ - } - - obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); - if (!obj) { - DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); - return -EINVAL; - } - - obj_priv = obj->driver_private; - - RADEON_WRITE(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset, 0); - if (radeon_is_avivo(dev_priv)) { - RADEON_WRITE(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, - dev_priv->fb_location + obj_priv->bo->offset); - RADEON_WRITE(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, - (CURSOR_WIDTH - 1) << 16 | (CURSOR_HEIGHT - 1)); - RADEON_WRITE(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset, - AVIVO_D1CURSOR_EN | (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); - } - - mutex_lock(&crtc->dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&crtc->dev->struct_mutex); - - return 0; -} - -static int radeon_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct drm_radeon_private *dev_priv = crtc->dev->dev_private; - int xorigin = 0, yorigin = 0; - - if (x < 0) xorigin = -x+1; - if (y < 0) yorigin = -x+1; - if (xorigin >= CURSOR_WIDTH) xorigin = CURSOR_WIDTH - 1; - if (yorigin >= CURSOR_WIDTH) yorigin = CURSOR_WIDTH - 1; - - if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) - y /= 2; - else if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) - y *= 2; - - if (radeon_is_avivo(dev_priv)) { - avivo_lock_cursor(crtc, true); - - RADEON_WRITE(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, - ((xorigin ? 0: x) << 16) | - (yorigin ? 0 : y)); - RADEON_WRITE(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); - avivo_lock_cursor(crtc, false); - } - - return 0; -} - static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int i; + int i, j; if (size != 256) return; - for (i = 0; i < 256; i++) { - radeon_crtc->lut_r[i] = red[i] >> 8; - radeon_crtc->lut_g[i] = green[i] >> 8; - radeon_crtc->lut_b[i] = blue[i] >> 8; + if (crtc->fb->depth == 16) { + for (i = 0; i < 64; i++) { + if (i <= 31) { + for (j = 0; j < 8; j++) { + radeon_crtc->lut_r[i * 8 + j] = red[i] >> 8; + radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 8; + } + } + for (j = 0; j < 4; j++) + radeon_crtc->lut_g[i * 4 + j] = green[i] >> 8; + } + } else { + for (i = 0; i < 256; i++) { + radeon_crtc->lut_r[i] = red[i] >> 8; + radeon_crtc->lut_g[i] = green[i] >> 8; + radeon_crtc->lut_b[i] = blue[i] >> 8; + } } radeon_crtc_load_lut(crtc); @@ -263,15 +160,6 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) kfree(radeon_crtc); } -static const struct drm_crtc_helper_funcs radeon_helper_funcs = { - .dpms = radeon_crtc_dpms, - .mode_fixup = radeon_crtc_mode_fixup, - .mode_set = radeon_crtc_mode_set, - .mode_set_base = radeon_crtc_set_base, - .prepare = radeon_crtc_prepare, - .commit = radeon_crtc_commit, -}; - static const struct drm_crtc_funcs radeon_crtc_funcs = { .cursor_set = radeon_crtc_cursor_set, .cursor_move = radeon_crtc_cursor_move, @@ -309,7 +197,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) if (dev_priv->is_atom_bios && dev_priv->chip_family > CHIP_RS690) radeon_atombios_init_crtc(dev, radeon_crtc); else - drm_crtc_helper_add(&radeon_crtc->base, &radeon_helper_funcs); + radeon_legacy_init_crtc(dev, radeon_crtc); } bool radeon_legacy_setup_enc_conn(struct drm_device *dev) @@ -360,8 +248,14 @@ bool radeon_setup_enc_conn(struct drm_device *dev) if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) || (mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_A) || (mode_info->bios_connector[i].connector_type == CONNECTOR_VGA)) { - if (radeon_is_avivo(dev_priv)) + if (radeon_is_avivo(dev_priv)) { encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 0); + } else { + if (mode_info->bios_connector[i].dac_type == DAC_PRIMARY) + encoder = radeon_encoder_legacy_primary_dac_add(dev, i, 0); + else if (mode_info->bios_connector[i].dac_type == DAC_TVDAC) + encoder = radeon_encoder_legacy_tv_dac_add(dev, i, 0); + } if (encoder) drm_mode_connector_attach_encoder(connector, encoder); } @@ -370,7 +264,13 @@ bool radeon_setup_enc_conn(struct drm_device *dev) if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) || (mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_D)) { if (radeon_is_avivo(dev_priv)) - encoder = radeon_encoder_atom_tmds_add(dev, i, mode_info->bios_connector[i].dac_type); + encoder = radeon_encoder_atom_tmds_add(dev, i, mode_info->bios_connector[i].tmds_type); + else { + if (mode_info->bios_connector[i].tmds_type == TMDS_INT) + encoder = radeon_encoder_legacy_tmds_int_add(dev, i); + else if (mode_info->bios_connector[i].dac_type == TMDS_EXT) + encoder = radeon_encoder_legacy_tmds_ext_add(dev, i); + } if (encoder) drm_mode_connector_attach_encoder(connector, encoder); } @@ -379,6 +279,10 @@ bool radeon_setup_enc_conn(struct drm_device *dev) if (mode_info->bios_connector[i].connector_type == CONNECTOR_DIN) { if (radeon_is_avivo(dev_priv)) encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 1); + else { + if (mode_info->bios_connector[i].dac_type == DAC_TVDAC) + encoder = radeon_encoder_legacy_tv_dac_add(dev, i, 0); + } if (encoder) drm_mode_connector_attach_encoder(connector, encoder); } @@ -389,31 +293,6 @@ bool radeon_setup_enc_conn(struct drm_device *dev) return true; } - - -void avivo_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) -{ - struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private; - uint32_t temp; - struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec; - - temp = RADEON_READ(rec->mask_clk_reg); - if (lock_state) - temp |= rec->put_clk_mask; - else - temp &= ~rec->put_clk_mask; - RADEON_WRITE(rec->mask_clk_reg, temp); - temp = RADEON_READ(rec->mask_clk_reg); - - temp = RADEON_READ(rec->mask_data_reg); - if (lock_state) - temp |= rec->put_data_mask; - else - temp &= ~rec->put_data_mask; - RADEON_WRITE(rec->mask_data_reg, temp); - temp = RADEON_READ(rec->mask_data_reg); -} - int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) { struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private; @@ -422,12 +301,9 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) if (!radeon_connector->ddc_bus) return -1; - - if (radeon_is_avivo(dev_priv)) - avivo_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector, 1); edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - if (radeon_is_avivo(dev_priv)) - avivo_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector, 0); if (edid) { drm_mode_connector_update_edid_property(&radeon_connector->base, edid); ret = drm_add_edid_modes(&radeon_connector->base, edid); @@ -445,7 +321,9 @@ int radeon_ddc_dump(struct drm_connector *connector) if (!radeon_connector->ddc_bus) return -1; + radeon_i2c_do_lock(radeon_connector, 1); edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); + radeon_i2c_do_lock(radeon_connector, 0); if (edid) { kfree(edid); } @@ -456,7 +334,7 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) { uint64_t x, y, result; uint64_t mod; - + n += d / 2; mod = do_div(n, d); @@ -542,7 +420,7 @@ void radeon_compute_pll(struct radeon_pll *pll, current_freq = radeon_div((uint64_t)pll->reference_freq * 10000 * feedback_div, ref_div * post_div); - + error = abs(current_freq - freq); vco_diff = abs(vco - best_vco); @@ -573,7 +451,7 @@ void radeon_compute_pll(struct radeon_pll *pll, best_vco_diff = vco_diff; } } - + if (current_freq < freq) min_feed_div = feedback_div+1; else @@ -581,7 +459,7 @@ void radeon_compute_pll(struct radeon_pll *pll, } } } - + *dot_clock_p = best_freq / 10000; *fb_div_p = best_feedback_div; *ref_div_p = best_ref_div; @@ -703,7 +581,7 @@ int radeon_modeset_init(struct drm_device *dev) if (!ret) return ret; - + drm_helper_initial_config(dev, false); return 0; diff --git a/linux-core/radeon_encoders.c b/linux-core/radeon_encoders.c index 1b75bd6a..04c57092 100644 --- a/linux-core/radeon_encoders.c +++ b/linux-core/radeon_encoders.c @@ -30,9 +30,9 @@ extern int atom_debug; -static void radeon_rmx_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +void radeon_rmx_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); if (mode->hdisplay < radeon_encoder->panel_xres || @@ -279,7 +279,7 @@ static void radeon_lvtma_dpms(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_ON: atombios_display_device_control(encoder, index, ATOM_ENABLE); break; - case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_display_device_control(encoder, index, ATOM_DISABLE); @@ -319,7 +319,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_lvtma_helper_funcs = { .commit = radeon_lvtma_commit, }; -static void radeon_enc_destroy(struct drm_encoder *encoder) +void radeon_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); drm_encoder_cleanup(encoder); @@ -792,7 +792,7 @@ static void radeon_atom_tmds_dpms(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_ON: atombios_display_device_control(encoder, index, ATOM_ENABLE); break; - case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_display_device_control(encoder, index, ATOM_DISABLE); @@ -895,68 +895,3 @@ struct drm_encoder *radeon_encoder_atom_tmds_add(struct drm_device *dev, int bio return encoder; } -static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) -{ - struct drm_device *dev = encoder->dev; -} - -static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) -{ - radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); -} - -static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) -{ - radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON); -} - -static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - - -} - -static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { - .dpms = radeon_legacy_lvds_dpms, - .mode_fixup = radeon_lvtma_mode_fixup, - .prepare = radeon_legacy_lvds_prepare, - .mode_set = radeon_legacy_lvds_mode_set, - .commit = radeon_legacy_lvds_commit, -}; - - -static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { - .destroy = radeon_enc_destroy, -}; - -struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index) -{ - struct drm_radeon_private *dev_priv = dev->dev_private; - struct radeon_mode_info *mode_info = &dev_priv->mode_info; - struct radeon_encoder *radeon_encoder; - struct drm_encoder *encoder; - radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); - if (!radeon_encoder) { - return NULL; - } - - encoder = &radeon_encoder->base; - - encoder->possible_crtcs = 0x3; - encoder->possible_clones = 0; - drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); - - drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); - - /* TODO get the LVDS info from the BIOS for panel size etc. */ - /* get the lvds info from the bios */ - radeon_combios_get_lvds_info(radeon_encoder); - - /* LVDS gets default RMX full scaling */ - radeon_encoder->rmx_type = RMX_FULL; - - return encoder; -} diff --git a/linux-core/radeon_gem.c b/linux-core/radeon_gem.c index d536aed2..6a7529cd 100644 --- a/linux-core/radeon_gem.c +++ b/linux-core/radeon_gem.c @@ -27,6 +27,11 @@ #include "radeon_drm.h" #include "radeon_drv.h" +static int radeon_gem_ib_init(struct drm_device *dev); +static int radeon_gem_ib_destroy(struct drm_device *dev); +static int radeon_gem_dma_bufs_init(struct drm_device *dev); +static void radeon_gem_dma_bufs_destroy(struct drm_device *dev); + int radeon_gem_init_object(struct drm_gem_object *obj) { struct drm_radeon_gem_object *obj_priv; @@ -240,9 +245,11 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage)); /* validate into a pin with no fence */ - ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT, - DRM_BO_HINT_DONT_FENCE, - 0, NULL); + if (!(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) { + ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT, + DRM_BO_HINT_DONT_FENCE, 0); + } else + ret = 0; args->offset = obj_priv->bo->offset; DRM_DEBUG("got here %p %p\n", obj, obj_priv->bo); @@ -270,8 +277,7 @@ int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, /* validate into a pin with no fence */ ret = drm_bo_do_validate(obj_priv->bo, DRM_BO_FLAG_NO_EVICT, DRM_BO_FLAG_NO_EVICT, - DRM_BO_HINT_DONT_FENCE, - 0, NULL); + DRM_BO_HINT_DONT_FENCE, 0); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(obj); @@ -315,7 +321,7 @@ int radeon_gem_indirect_ioctl(struct drm_device *dev, void *data, //VB_AGE_TEST_WITH_RETURN(dev_priv); ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT, - 0 , 0, NULL); + 0 , 0); if (ret) return ret; @@ -481,6 +487,7 @@ static int radeon_gart_init(struct drm_device *dev) /* setup a 32MB GART */ dev_priv->gart_size = dev_priv->mm.gart_size; + dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; #if __OS_HAS_AGP /* setup VRAM vs GART here */ @@ -513,19 +520,19 @@ static int radeon_gart_init(struct drm_device *dev) ret = drm_buffer_object_create(dev, RADEON_PCIGART_TABLE_SIZE, drm_bo_type_kernel, DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT, - 0, 1, 0, &dev_priv->mm.pcie_table); + 0, 1, 0, &dev_priv->mm.pcie_table.bo); if (ret) return -EINVAL; - DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table, dev_priv->mm.pcie_table->offset); - ret = drm_bo_kmap(dev_priv->mm.pcie_table, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT, - &dev_priv->mm.pcie_table_map); + DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table.bo, dev_priv->mm.pcie_table.bo->offset); + ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT, + &dev_priv->mm.pcie_table.kmap); if (ret) return -EINVAL; dev_priv->pcigart_offset_set = 2; - dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table->offset; - dev_priv->gart_info.addr = dev_priv->mm.pcie_table_map.virtual; + dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table.bo->offset; + dev_priv->gart_info.addr = dev_priv->mm.pcie_table.kmap.virtual; dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB; memset(dev_priv->gart_info.addr, 0, RADEON_PCIGART_TABLE_SIZE); @@ -543,8 +550,8 @@ static int radeon_gart_init(struct drm_device *dev) dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; else dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; - dev_priv->gart_info.addr = NULL; - dev_priv->gart_info.bus_addr = 0; + dev_priv->gart_info.addr = dev_priv->gart_info.table_handle->vaddr; + dev_priv->gart_info.bus_addr = dev_priv->gart_info.table_handle->busaddr; } /* gart values setup - start the GART */ @@ -566,14 +573,14 @@ int radeon_alloc_gart_objects(struct drm_device *dev) drm_bo_type_kernel, DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT, - 0, 1, 0, &dev_priv->mm.ring); + 0, 1, 0, &dev_priv->mm.ring.bo); if (ret) { DRM_ERROR("failed to allocate ring\n"); return -EINVAL; } - ret = drm_bo_kmap(dev_priv->mm.ring, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT, - &dev_priv->mm.ring_map); + ret = drm_bo_kmap(dev_priv->mm.ring.bo, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT, + &dev_priv->mm.ring.kmap); if (ret) { DRM_ERROR("failed to map ring\n"); return -EINVAL; @@ -583,39 +590,132 @@ int radeon_alloc_gart_objects(struct drm_device *dev) drm_bo_type_kernel, DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT, - 0, 1, 0, &dev_priv->mm.ring_read_ptr); + 0, 1, 0, &dev_priv->mm.ring_read.bo); if (ret) { DRM_ERROR("failed to allocate ring read\n"); return -EINVAL; } - ret = drm_bo_kmap(dev_priv->mm.ring_read_ptr, 0, + ret = drm_bo_kmap(dev_priv->mm.ring_read.bo, 0, PAGE_SIZE >> PAGE_SHIFT, - &dev_priv->mm.ring_read_ptr_map); + &dev_priv->mm.ring_read.kmap); if (ret) { DRM_ERROR("failed to map ring read\n"); return -EINVAL; } DRM_DEBUG("Ring ptr %p mapped at %d %p, read ptr %p maped at %d %p\n", - dev_priv->mm.ring, dev_priv->mm.ring->offset, dev_priv->mm.ring_map.virtual, - dev_priv->mm.ring_read_ptr, dev_priv->mm.ring_read_ptr->offset, dev_priv->mm.ring_read_ptr_map.virtual); + dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual, + dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual); + /* init the indirect buffers */ + radeon_gem_ib_init(dev); + radeon_gem_dma_bufs_init(dev); return 0; } +static void radeon_init_memory_map(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + u32 mem_size, aper_size; + + dev_priv->mc_fb_location = radeon_read_fb_location(dev_priv); + radeon_read_agp_location(dev_priv, &dev_priv->mc_agp_loc_lo, &dev_priv->mc_agp_loc_hi); + + if (dev_priv->chip_family >= CHIP_R600) { + mem_size = RADEON_READ(R600_CONFIG_MEMSIZE); + aper_size = RADEON_READ(R600_CONFIG_APER_SIZE); + } else { + mem_size = RADEON_READ(RADEON_CONFIG_MEMSIZE); + aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE); + } + + /* M6s report illegal memory size */ + if (mem_size == 0) + mem_size = 8 * 1024 * 1024; + + /* for RN50/M6/M7 - Novell bug 204882 */ + if (aper_size > mem_size) + mem_size = aper_size; + + if ((dev_priv->chip_family != CHIP_RS600) && + (dev_priv->chip_family != CHIP_RS690) && + (dev_priv->chip_family != CHIP_RS740)) { + if (dev_priv->flags & RADEON_IS_IGP) + dev_priv->mc_fb_location = RADEON_READ(RADEON_NB_TOM); + else { + uint32_t aper0_base; + + if (dev_priv->chip_family >= CHIP_R600) + aper0_base = RADEON_READ(R600_CONFIG_F0_BASE); + else + aper0_base = RADEON_READ(RADEON_CONFIG_APER_0_BASE); + + + /* Some chips have an "issue" with the memory controller, the + * location must be aligned to the size. We just align it down, + * too bad if we walk over the top of system memory, we don't + * use DMA without a remapped anyway. + * Affected chips are rv280, all r3xx, and all r4xx, but not IGP + */ + if (dev_priv->chip_family == CHIP_RV280 || + dev_priv->chip_family == CHIP_R300 || + dev_priv->chip_family == CHIP_R350 || + dev_priv->chip_family == CHIP_RV350 || + dev_priv->chip_family == CHIP_RV380 || + dev_priv->chip_family == CHIP_R420 || + dev_priv->chip_family == CHIP_RV410) + aper0_base &= ~(mem_size - 1); + + if (dev_priv->chip_family >= CHIP_R600) { + dev_priv->mc_fb_location = (aper0_base >> 24) | + (((aper0_base + mem_size - 1) & 0xff000000U) >> 8); + } else { + dev_priv->mc_fb_location = (aper0_base >> 16) | + ((aper0_base + mem_size - 1) & 0xffff0000U); + } + } + } + + if (dev_priv->chip_family >= CHIP_R600) + dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 24; + else + dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 16; + + if (radeon_is_avivo(dev_priv)) { + if (dev_priv->chip_family >= CHIP_R600) + RADEON_WRITE(R600_HDP_NONSURFACE_BASE, (dev_priv->mc_fb_location << 16) & 0xff0000); + else + RADEON_WRITE(AVIVO_HDP_FB_LOCATION, dev_priv->mc_fb_location); + } + + radeon_write_fb_location(dev_priv, dev_priv->mc_fb_location); + + dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; + dev_priv->fb_size = + ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) + - dev_priv->fb_location; + +} + /* init memory manager - start with all of VRAM and a 32MB GART aperture for now */ int radeon_gem_mm_init(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; int ret; + u32 pg_offset; /* size the mappable VRAM memory for now */ radeon_vram_setup(dev); - drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, /*dev_priv->mm.vram_offset >> PAGE_SHIFT,*/ - (dev_priv->mm.vram_visible) >> PAGE_SHIFT, + radeon_init_memory_map(dev); + +#define VRAM_RESERVE_TEXT (64*1024) + dev_priv->mm.vram_visible -= VRAM_RESERVE_TEXT; + pg_offset = VRAM_RESERVE_TEXT >> PAGE_SHIFT; + drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, pg_offset, /*dev_priv->mm.vram_offset >> PAGE_SHIFT,*/ + ((dev_priv->mm.vram_visible) >> PAGE_SHIFT) - 16, 0); @@ -634,6 +734,8 @@ int radeon_gem_mm_init(struct drm_device *dev) ret = radeon_alloc_gart_objects(dev); if (ret) return -EINVAL; + + dev_priv->mm_enabled = true; return 0; } @@ -641,16 +743,20 @@ void radeon_gem_mm_fini(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; + radeon_gem_dma_bufs_destroy(dev); + radeon_gem_ib_destroy(dev); + mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.ring_read_ptr) { - drm_bo_kunmap(&dev_priv->mm.ring_read_ptr_map); - drm_bo_usage_deref_locked(&dev_priv->mm.ring_read_ptr); + + if (dev_priv->mm.ring_read.bo) { + drm_bo_kunmap(&dev_priv->mm.ring_read.kmap); + drm_bo_usage_deref_locked(&dev_priv->mm.ring_read.bo); } - if (dev_priv->mm.ring) { - drm_bo_kunmap(&dev_priv->mm.ring_map); - drm_bo_usage_deref_locked(&dev_priv->mm.ring); + if (dev_priv->mm.ring.bo) { + drm_bo_kunmap(&dev_priv->mm.ring.kmap); + drm_bo_usage_deref_locked(&dev_priv->mm.ring.bo); } if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) { @@ -658,9 +764,9 @@ void radeon_gem_mm_fini(struct drm_device *dev) } if (dev_priv->flags & RADEON_IS_PCIE) { - if (dev_priv->mm.pcie_table) { - drm_bo_kunmap(&dev_priv->mm.pcie_table_map); - drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table); + if (dev_priv->mm.pcie_table.bo) { + drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap); + drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo); } } @@ -669,6 +775,9 @@ void radeon_gem_mm_fini(struct drm_device *dev) } mutex_unlock(&dev->struct_mutex); + + drm_bo_driver_finish(dev); + dev_priv->mm_enabled = false; } int radeon_gem_object_pin(struct drm_gem_object *obj, @@ -680,8 +789,479 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, obj_priv = obj->driver_private; ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT, - DRM_BO_HINT_DONT_FENCE, 0, NULL); + DRM_BO_HINT_DONT_FENCE, 0); return ret; } +#define RADEON_IB_MEMORY (1*1024*1024) +#define RADEON_IB_SIZE (65536) + +#define RADEON_NUM_IB (RADEON_IB_MEMORY / RADEON_IB_SIZE) + +int radeon_gem_ib_get(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset) +{ + int i, index = -1; + int ret; + drm_radeon_private_t *dev_priv = dev->dev_private; + + for (i = 0; i < RADEON_NUM_IB; i++) { + if (!(dev_priv->ib_alloc_bitmap & (1 << i))){ + index = i; + break; + } + } + + /* if all in use we need to wait */ + if (index == -1) { + for (i = 0; i < RADEON_NUM_IB; i++) { + if (dev_priv->ib_alloc_bitmap & (1 << i)) { + mutex_lock(&dev_priv->ib_objs[i]->bo->mutex); + ret = drm_bo_wait(dev_priv->ib_objs[i]->bo, 0, 1, 0, 0); + mutex_unlock(&dev_priv->ib_objs[i]->bo->mutex); + if (ret) + continue; + dev_priv->ib_alloc_bitmap &= ~(1 << i); + index = i; + break; + } + } + } + + if (index == -1) { + DRM_ERROR("Major case fail to allocate IB from freelist %x\n", dev_priv->ib_alloc_bitmap); + return -EINVAL; + } + + + if (dwords > RADEON_IB_SIZE / sizeof(uint32_t)) + return -EINVAL; + + ret = drm_bo_do_validate(dev_priv->ib_objs[index]->bo, 0, + DRM_BO_FLAG_NO_EVICT, + 0, 0); + if (ret) { + DRM_ERROR("Failed to validate IB %d\n", index); + return -EINVAL; + } + + *card_offset = dev_priv->gart_vm_start + dev_priv->ib_objs[index]->bo->offset; + *ib = dev_priv->ib_objs[index]->kmap.virtual; + dev_priv->ib_alloc_bitmap |= (1 << i); + return 0; +} + +static void radeon_gem_ib_free(struct drm_device *dev, void *ib, uint32_t dwords) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + struct drm_fence_object *fence; + int ret; + int i; + + for (i = 0; i < RADEON_NUM_IB; i++) { + + if (dev_priv->ib_objs[i]->kmap.virtual == ib) { + /* emit a fence object */ + ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence); + if (ret) { + + drm_putback_buffer_objects(dev); + } + /* dereference the fence object */ + if (fence) + drm_fence_usage_deref_unlocked(&fence); + } + } + +} + +static int radeon_gem_ib_destroy(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + int i; + + if (dev_priv->ib_objs) { + for (i = 0; i < RADEON_NUM_IB; i++) { + if (dev_priv->ib_objs[i]) { + drm_bo_kunmap(&dev_priv->ib_objs[i]->kmap); + drm_bo_usage_deref_unlocked(&dev_priv->ib_objs[i]->bo); + } + drm_free(dev_priv->ib_objs[i], sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER); + } + drm_free(dev_priv->ib_objs, RADEON_NUM_IB*sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER); + } + dev_priv->ib_objs = NULL; + return 0; +} + +static int radeon_gem_relocate(struct drm_device *dev, struct drm_file *file_priv, + uint32_t *reloc, uint32_t *offset) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + /* relocate the handle */ + int domains = reloc[2]; + struct drm_gem_object *obj; + int flags = 0; + int ret; + struct drm_radeon_gem_object *obj_priv; + + obj = drm_gem_object_lookup(dev, file_priv, reloc[1]); + if (!obj) + return false; + + obj_priv = obj->driver_private; + if (domains == RADEON_GEM_DOMAIN_VRAM) { + flags = DRM_BO_FLAG_MEM_VRAM; + } else { + flags = DRM_BO_FLAG_MEM_TT; + } + + ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM, 0, 0); + if (ret) + return ret; + + if (flags == DRM_BO_FLAG_MEM_VRAM) + *offset = obj_priv->bo->offset + dev_priv->fb_location; + else + *offset = obj_priv->bo->offset + dev_priv->gart_vm_start; + + /* BAD BAD BAD - LINKED LIST THE OBJS and UNREF ONCE IB is SUBMITTED */ + drm_gem_object_unreference(obj); + return 0; +} + +/* allocate 1MB of 64k IBs the the kernel can keep mapped */ +static int radeon_gem_ib_init(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + int i; + int ret; + + dev_priv->ib_objs = drm_calloc(RADEON_NUM_IB, sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER); + if (!dev_priv->ib_objs) + goto free_all; + + for (i = 0; i < RADEON_NUM_IB; i++) { + dev_priv->ib_objs[i] = drm_calloc(1, sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER); + if (!dev_priv->ib_objs[i]) + goto free_all; + + ret = drm_buffer_object_create(dev, RADEON_IB_SIZE, + drm_bo_type_kernel, + DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT | + DRM_BO_FLAG_MAPPABLE, 0, + 0, 0, &dev_priv->ib_objs[i]->bo); + if (ret) + goto free_all; + + ret = drm_bo_kmap(dev_priv->ib_objs[i]->bo, 0, RADEON_IB_SIZE >> PAGE_SHIFT, + &dev_priv->ib_objs[i]->kmap); + + if (ret) + goto free_all; + } + + dev_priv->ib_alloc_bitmap = 0; + + dev_priv->cs.ib_get = radeon_gem_ib_get; + dev_priv->cs.ib_free = radeon_gem_ib_free; + + radeon_cs_init(dev); + dev_priv->cs.relocate = radeon_gem_relocate; + return 0; + +free_all: + radeon_gem_ib_destroy(dev); + return -ENOMEM; +} + +#define RADEON_DMA_BUFFER_SIZE (64 * 1024) +#define RADEON_DMA_BUFFER_COUNT (16) + + +/** + * Cleanup after an error on one of the addbufs() functions. + * + * \param dev DRM device. + * \param entry buffer entry where the error occurred. + * + * Frees any pages and buffers associated with the given entry. + */ +static void drm_cleanup_buf_error(struct drm_device * dev, + struct drm_buf_entry * entry) +{ + int i; + + if (entry->seg_count) { + for (i = 0; i < entry->seg_count; i++) { + if (entry->seglist[i]) { + drm_pci_free(dev, entry->seglist[i]); + } + } + drm_free(entry->seglist, + entry->seg_count * + sizeof(*entry->seglist), DRM_MEM_SEGS); + + entry->seg_count = 0; + } + + if (entry->buf_count) { + for (i = 0; i < entry->buf_count; i++) { + if (entry->buflist[i].dev_private) { + drm_free(entry->buflist[i].dev_private, + entry->buflist[i].dev_priv_size, + DRM_MEM_BUFS); + } + } + drm_free(entry->buflist, + entry->buf_count * + sizeof(*entry->buflist), DRM_MEM_BUFS); + + entry->buf_count = 0; + } +} + +static int radeon_gem_addbufs(struct drm_device *dev) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_entry *entry; + struct drm_buf *buf; + unsigned long offset; + unsigned long agp_offset; + int count; + int order; + int size; + int alignment; + int page_order; + int total; + int byte_count; + int i; + struct drm_buf **temp_buflist; + + if (!dma) + return -EINVAL; + + count = RADEON_DMA_BUFFER_COUNT; + order = drm_order(RADEON_DMA_BUFFER_SIZE); + size = 1 << order; + + alignment = PAGE_ALIGN(size); + page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; + total = PAGE_SIZE << page_order; + + byte_count = 0; + agp_offset = dev_priv->mm.dma_bufs.bo->offset; + + DRM_DEBUG("count: %d\n", count); + DRM_DEBUG("order: %d\n", order); + DRM_DEBUG("size: %d\n", size); + DRM_DEBUG("agp_offset: %lu\n", agp_offset); + DRM_DEBUG("alignment: %d\n", alignment); + DRM_DEBUG("page_order: %d\n", page_order); + DRM_DEBUG("total: %d\n", total); + + if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) + return -EINVAL; + if (dev->queue_count) + return -EBUSY; /* Not while in use */ + + spin_lock(&dev->count_lock); + if (dev->buf_use) { + spin_unlock(&dev->count_lock); + return -EBUSY; + } + atomic_inc(&dev->buf_alloc); + spin_unlock(&dev->count_lock); + + mutex_lock(&dev->struct_mutex); + entry = &dma->bufs[order]; + if (entry->buf_count) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; /* May only call once for each order */ + } + + if (count < 0 || count > 4096) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -EINVAL; + } + + entry->buflist = drm_alloc(count * sizeof(*entry->buflist), + DRM_MEM_BUFS); + if (!entry->buflist) { + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + memset(entry->buflist, 0, count * sizeof(*entry->buflist)); + + entry->buf_size = size; + entry->page_order = page_order; + + offset = 0; + + while (entry->buf_count < count) { + buf = &entry->buflist[entry->buf_count]; + buf->idx = dma->buf_count + entry->buf_count; + buf->total = alignment; + buf->order = order; + buf->used = 0; + + buf->offset = (dma->byte_count + offset); + buf->bus_address = dev_priv->gart_vm_start + agp_offset + offset; + buf->address = (void *)(agp_offset + offset); + buf->next = NULL; + buf->waiting = 0; + buf->pending = 0; + init_waitqueue_head(&buf->dma_wait); + buf->file_priv = NULL; + + buf->dev_priv_size = dev->driver->dev_priv_size; + buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + if (!buf->dev_private) { + /* Set count correctly so we free the proper amount. */ + entry->buf_count = count; + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + + memset(buf->dev_private, 0, buf->dev_priv_size); + + DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); + + offset += alignment; + entry->buf_count++; + byte_count += PAGE_SIZE << page_order; + } + + DRM_DEBUG("byte_count: %d\n", byte_count); + + temp_buflist = drm_realloc(dma->buflist, + dma->buf_count * sizeof(*dma->buflist), + (dma->buf_count + entry->buf_count) + * sizeof(*dma->buflist), DRM_MEM_BUFS); + if (!temp_buflist) { + /* Free the entry because it isn't valid */ + drm_cleanup_buf_error(dev, entry); + mutex_unlock(&dev->struct_mutex); + atomic_dec(&dev->buf_alloc); + return -ENOMEM; + } + dma->buflist = temp_buflist; + + for (i = 0; i < entry->buf_count; i++) { + dma->buflist[i + dma->buf_count] = &entry->buflist[i]; + } + + dma->buf_count += entry->buf_count; + dma->seg_count += entry->seg_count; + dma->page_count += byte_count >> PAGE_SHIFT; + dma->byte_count += byte_count; + + DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); + DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); + + mutex_unlock(&dev->struct_mutex); + + dma->flags = _DRM_DMA_USE_SG; + atomic_dec(&dev->buf_alloc); + return 0; +} + +static int radeon_gem_dma_bufs_init(struct drm_device *dev) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + int size = RADEON_DMA_BUFFER_SIZE * RADEON_DMA_BUFFER_COUNT; + int ret; + + ret = drm_dma_setup(dev); + if (ret < 0) + return ret; + + ret = drm_buffer_object_create(dev, size, drm_bo_type_device, + DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_NO_EVICT | + DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE, 0, + 0, 0, &dev_priv->mm.dma_bufs.bo); + if (ret) { + DRM_ERROR("Failed to create DMA bufs\n"); + return -ENOMEM; + } + + ret = drm_bo_kmap(dev_priv->mm.dma_bufs.bo, 0, size >> PAGE_SHIFT, + &dev_priv->mm.dma_bufs.kmap); + if (ret) { + DRM_ERROR("Failed to mmap DMA buffers\n"); + return -ENOMEM; + } + DRM_DEBUG("\n"); + radeon_gem_addbufs(dev); + + DRM_DEBUG("%x %d\n", dev_priv->mm.dma_bufs.bo->map_list.hash.key, size); + dev->agp_buffer_token = dev_priv->mm.dma_bufs.bo->map_list.hash.key << PAGE_SHIFT; + dev_priv->mm.fake_agp_map.handle = dev_priv->mm.dma_bufs.kmap.virtual; + dev_priv->mm.fake_agp_map.size = size; + + dev->agp_buffer_map = &dev_priv->mm.fake_agp_map; + dev_priv->gart_buffers_offset = dev_priv->mm.dma_bufs.bo->offset + dev_priv->gart_vm_start; + return 0; +} + +static void radeon_gem_dma_bufs_destroy(struct drm_device *dev) +{ + + struct drm_radeon_private *dev_priv = dev->dev_private; + drm_dma_takedown(dev); + + drm_bo_kunmap(&dev_priv->mm.dma_bufs.kmap); + drm_bo_usage_deref_unlocked(&dev_priv->mm.dma_bufs.bo); +} + + +static struct drm_gem_object *gem_object_get(struct drm_device *dev, uint32_t name) +{ + struct drm_gem_object *obj; + + spin_lock(&dev->object_name_lock); + obj = idr_find(&dev->object_name_idr, name); + if (obj) + drm_gem_object_reference(obj); + spin_unlock(&dev->object_name_lock); + return obj; +} + +void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + struct drm_radeon_master_private *master_priv = master->driver_priv; + drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; + struct drm_gem_object *obj; + struct drm_radeon_gem_object *obj_priv; + + /* update front_pitch_offset and back_pitch_offset */ + obj = gem_object_get(dev, sarea_priv->front_handle); + if (obj) { + obj_priv = obj->driver_private; + + dev_priv->front_offset = obj_priv->bo->offset; + dev_priv->front_pitch_offset = (((sarea_priv->front_pitch / 64) << 22) | + ((obj_priv->bo->offset + + dev_priv->fb_location) >> 10)); + drm_gem_object_unreference(obj); + } + + obj = gem_object_get(dev, sarea_priv->back_handle); + if (obj) { + obj_priv = obj->driver_private; + dev_priv->back_offset = obj_priv->bo->offset; + dev_priv->back_pitch_offset = (((sarea_priv->back_pitch / 64) << 22) | + ((obj_priv->bo->offset + + dev_priv->fb_location) >> 10)); + drm_gem_object_unreference(obj); + } + dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; + +} diff --git a/linux-core/radeon_i2c.c b/linux-core/radeon_i2c.c index eccec650..00fc7c0e 100644 --- a/linux-core/radeon_i2c.c +++ b/linux-core/radeon_i2c.c @@ -27,6 +27,38 @@ #include "radeon_drm.h" #include "radeon_drv.h" +void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) +{ + struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private; + uint32_t temp; + struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec; + + if (lock_state) { + temp = RADEON_READ(rec->a_clk_reg); + temp &= ~(rec->a_clk_mask); + RADEON_WRITE(rec->a_clk_reg, temp); + + temp = RADEON_READ(rec->a_data_reg); + temp &= ~(rec->a_data_mask); + RADEON_WRITE(rec->a_data_reg, temp); + } + + temp = RADEON_READ(rec->mask_clk_reg); + if (lock_state) + temp |= rec->mask_clk_mask; + else + temp &= ~rec->mask_clk_mask; + RADEON_WRITE(rec->mask_clk_reg, temp); + temp = RADEON_READ(rec->mask_clk_reg); + + temp = RADEON_READ(rec->mask_data_reg); + if (lock_state) + temp |= rec->mask_data_mask; + else + temp &= ~rec->mask_data_mask; + RADEON_WRITE(rec->mask_data_reg, temp); + temp = RADEON_READ(rec->mask_data_reg); +} static int get_clock(void *i2c_priv) { @@ -88,7 +120,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); if (i2c == NULL) return NULL; - + i2c->adapter.owner = THIS_MODULE; i2c->adapter.id = I2C_HW_B_RADEON; i2c->adapter.algo_data = &i2c->algo; @@ -113,7 +145,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, out_free: drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); return NULL; - + } void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) diff --git a/linux-core/radeon_legacy_crtc.c b/linux-core/radeon_legacy_crtc.c new file mode 100644 index 00000000..6775ffc0 --- /dev/null +++ b/linux-core/radeon_legacy_crtc.c @@ -0,0 +1,1020 @@ +/* + * Copyright 2007-8 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + */ +#include "drmP.h" +#include "radeon_drm.h" +#include "radeon_drv.h" + +#include "drm_crtc_helper.h" + +void radeon_restore_common_regs(struct drm_device *dev, struct radeon_legacy_state *state) +{ + /* don't need this yet */ +} + +static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + int i = 0; + + /* FIXME: Certain revisions of R300 can't recover here. Not sure of + the cause yet, but this workaround will mask the problem for now. + Other chips usually will pass at the very first test, so the + workaround shouldn't have any effect on them. */ + for (i = 0; + (i < 10000 && + RADEON_READ_PLL(dev_priv, RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R); + i++); +} + +static void radeon_pll_write_update(struct drm_device *dev) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + + while (RADEON_READ_PLL(dev_priv, RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R); + + RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, + RADEON_PPLL_ATOMIC_UPDATE_W, + ~(RADEON_PPLL_ATOMIC_UPDATE_W)); +} + +static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + int i = 0; + + + /* FIXME: Certain revisions of R300 can't recover here. Not sure of + the cause yet, but this workaround will mask the problem for now. + Other chips usually will pass at the very first test, so the + workaround shouldn't have any effect on them. */ + for (i = 0; + (i < 10000 && + RADEON_READ_PLL(dev_priv, RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R); + i++); +} + +static void radeon_pll2_write_update(struct drm_device *dev) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + + while (RADEON_READ_PLL(dev_priv, RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R); + + RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_REF_DIV, + RADEON_P2PLL_ATOMIC_UPDATE_W, + ~(RADEON_P2PLL_ATOMIC_UPDATE_W)); +} + +static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, + uint16_t fb_div) +{ + unsigned int vcoFreq; + + if (!ref_div) + return 1; + + vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; + + /* + * This is horribly crude: the VCO frequency range is divided into + * 3 parts, each part having a fixed PLL gain value. + */ + if (vcoFreq >= 30000) + /* + * [300..max] MHz : 7 + */ + return 7; + else if (vcoFreq >= 18000) + /* + * [180..300) MHz : 4 + */ + return 4; + else + /* + * [0..180) MHz : 1 + */ + return 1; +} + +void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint32_t mask; + + DRM_DEBUG("\n"); + + mask = radeon_crtc->crtc_id ? + (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_DISP_REQ_EN_B) : + (RADEON_CRTC_DISPLAY_DIS | RADEON_CRTC_VSYNC_DIS | RADEON_CRTC_HSYNC_DIS); + + switch(mode) { + case DRM_MODE_DPMS_ON: + if (radeon_crtc->crtc_id) + RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, 0, ~mask); + else { + RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, 0, ~RADEON_CRTC_DISP_REQ_EN_B); + RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); + } + break; + case DRM_MODE_DPMS_STANDBY: + if (radeon_crtc->crtc_id) + RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_HSYNC_DIS), ~mask); + else { + RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, 0, ~RADEON_CRTC_DISP_REQ_EN_B); + RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, (RADEON_CRTC_DISPLAY_DIS | RADEON_CRTC_HSYNC_DIS), ~mask); + } + break; + case DRM_MODE_DPMS_SUSPEND: + if (radeon_crtc->crtc_id) + RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_VSYNC_DIS), ~mask); + else { + RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, 0, ~RADEON_CRTC_DISP_REQ_EN_B); + RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, (RADEON_CRTC_DISPLAY_DIS | RADEON_CRTC_VSYNC_DIS), ~mask); + } + break; + case DRM_MODE_DPMS_OFF: + if (radeon_crtc->crtc_id) + RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); + else { + RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~RADEON_CRTC_DISP_REQ_EN_B); + RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); + } + break; + } + + if (mode != DRM_MODE_DPMS_OFF) { + radeon_crtc_load_lut(crtc); + } +} + +static bool radeon_set_crtc1_base(struct drm_crtc *crtc, int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_framebuffer *radeon_fb; + struct drm_radeon_gem_object *obj_priv; + uint32_t base; + uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; + uint32_t crtc_pitch; + + DRM_DEBUG("\n"); + + radeon_fb = to_radeon_framebuffer(crtc->fb); + + obj_priv = radeon_fb->obj->driver_private; + + crtc_offset = obj_priv->bo->offset; + + crtc_offset_cntl = 0; + + /* TODO tiling */ + if (0) { + if (radeon_is_r300(dev_priv)) + crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | + R300_CRTC_MICRO_TILE_BUFFER_DIS | + R300_CRTC_MACRO_TILE_EN); + else + crtc_offset_cntl |= RADEON_CRTC_TILE_EN; + } else { + if (radeon_is_r300(dev_priv)) + crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN | + R300_CRTC_MICRO_TILE_BUFFER_DIS | + R300_CRTC_MACRO_TILE_EN); + else + crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; + } + + base = obj_priv->bo->offset; + + /* TODO more tiling */ + if (0) { + if (radeon_is_r300(dev_priv)) { + crtc_tile_x0_y0 = x | (y << 16); + base &= ~0x7ff; + } else { + int byteshift = crtc->fb->bits_per_pixel >> 4; + int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; + base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); + crtc_offset_cntl |= (y % 16); + } + } else { + int offset = y * crtc->fb->pitch + x; + switch (crtc->fb->bits_per_pixel) { + case 15: + case 16: + offset *= 2; + break; + case 24: + offset *= 3; + break; + case 32: + offset *= 4; + break; + default: + return false; + } + base += offset; + } + + base &= ~7; + + /* update sarea TODO */ + + crtc_offset = base; + + crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + + ((crtc->fb->bits_per_pixel * 8) - 1)) / + (crtc->fb->bits_per_pixel * 8)); + crtc_pitch |= crtc_pitch << 16; + + DRM_DEBUG("mc_fb_location: 0x%x\n", dev_priv->fb_location); + + RADEON_WRITE(RADEON_DISPLAY_BASE_ADDR, dev_priv->fb_location); + + if (radeon_is_r300(dev_priv)) + RADEON_WRITE(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0); + RADEON_WRITE(RADEON_CRTC_OFFSET_CNTL, crtc_offset_cntl); + RADEON_WRITE(RADEON_CRTC_OFFSET, crtc_offset); + RADEON_WRITE(RADEON_CRTC_PITCH, crtc_pitch); + + return true; +} + +static bool radeon_set_crtc1_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + int format; + int hsync_start; + int hsync_wid; + int vsync_wid; + uint32_t crtc_gen_cntl; + uint32_t crtc_ext_cntl; + uint32_t crtc_h_total_disp; + uint32_t crtc_h_sync_strt_wid; + uint32_t crtc_v_total_disp; + uint32_t crtc_v_sync_strt_wid; + uint32_t disp_merge_cntl; + + DRM_DEBUG("\n"); + + switch (crtc->fb->bits_per_pixel) { + + case 15: /* 555 */ + format = 3; + break; + case 16: /* 565 */ + format = 4; + break; + case 24: /* RGB */ + format = 5; + break; + case 32: /* xRGB */ + format = 6; + break; + default: + return false; + } + + crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN + | RADEON_CRTC_EN + | (format << 8) + | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) + ? RADEON_CRTC_DBL_SCAN_EN + : 0) + | ((mode->flags & DRM_MODE_FLAG_CSYNC) + ? RADEON_CRTC_CSYNC_EN + : 0) + | ((mode->flags & DRM_MODE_FLAG_INTERLACE) + ? RADEON_CRTC_INTERLACE_EN + : 0)); + + crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); + crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | + RADEON_CRTC_VSYNC_DIS | + RADEON_CRTC_HSYNC_DIS | + RADEON_CRTC_DISPLAY_DIS); + + disp_merge_cntl = RADEON_READ(RADEON_DISP_MERGE_CNTL); + disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; + + crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + hsync_start = mode->crtc_hsync_start - 8; + + crtc_h_sync_strt_wid = ((hsync_start & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + /* This works for double scan mode. */ + crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC_V_SYNC_POL + : 0)); + + /* TODO -> Dell Server */ + if (0) { + uint32_t disp_hw_debug = RADEON_READ(RADEON_DISP_HW_DEBUG); + uint32_t tv_dac_cntl = RADEON_READ(RADEON_TV_DAC_CNTL); + uint32_t dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2); + uint32_t crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); + + dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; + dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; + + /* For CRT on DAC2, don't turn it on if BIOS didn't + enable it, even it's detected. + */ + disp_hw_debug |= RADEON_CRT2_DISP1_SEL; + tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16)); + tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16)); + + RADEON_WRITE(RADEON_TV_DAC_CNTL, tv_dac_cntl); + RADEON_WRITE(RADEON_DISP_HW_DEBUG, disp_hw_debug); + RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); + RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); + } + + RADEON_WRITE(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl | + RADEON_CRTC_DISP_REQ_EN_B); + + RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, + RADEON_CRTC_VSYNC_DIS | RADEON_CRTC_HSYNC_DIS | RADEON_CRTC_DISPLAY_DIS); + + RADEON_WRITE(RADEON_CRTC_H_TOTAL_DISP, crtc_h_total_disp); + RADEON_WRITE(RADEON_CRTC_H_SYNC_STRT_WID, crtc_h_sync_strt_wid); + RADEON_WRITE(RADEON_CRTC_V_TOTAL_DISP, crtc_v_total_disp); + RADEON_WRITE(RADEON_CRTC_V_SYNC_STRT_WID, crtc_v_sync_strt_wid); + + RADEON_WRITE(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); + + RADEON_WRITE(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); + + return true; +} + +static void radeon_set_pll1(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct drm_encoder *encoder; + uint32_t feedback_div = 0; + uint32_t reference_div = 0; + uint32_t post_divider = 0; + uint32_t freq = 0; + uint8_t pll_gain; + int pll_flags = RADEON_PLL_LEGACY | RADEON_PLL_PREFER_LOW_REF_DIV; + bool use_bios_divs = false; + /* PLL registers */ + uint32_t ppll_ref_div = 0; + uint32_t ppll_div_3 = 0; + uint32_t htotal_cntl = 0; + uint32_t vclk_ecp_cntl; + + struct radeon_pll *pll = &dev_priv->mode_info.pll; + + struct { + int divider; + int bitvalue; + } *post_div, post_divs[] = { + /* From RAGE 128 VR/RAGE 128 GL Register + * Reference Manual (Technical Reference + * Manual P/N RRG-G04100-C Rev. 0.04), page + * 3-17 (PLL_DIV_[3:0]). + */ + { 1, 0 }, /* VCLK_SRC */ + { 2, 1 }, /* VCLK_SRC/2 */ + { 4, 2 }, /* VCLK_SRC/4 */ + { 8, 3 }, /* VCLK_SRC/8 */ + { 3, 4 }, /* VCLK_SRC/3 */ + { 16, 5 }, /* VCLK_SRC/16 */ + { 6, 6 }, /* VCLK_SRC/6 */ + { 12, 7 }, /* VCLK_SRC/12 */ + { 0, 0 } + }; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) + pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; + if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->use_bios_dividers) { + ppll_ref_div = radeon_encoder->panel_ref_divider; + ppll_div_3 = (radeon_encoder->panel_fb_divider | + (radeon_encoder->panel_post_divider << 16)); + htotal_cntl = 0; + use_bios_divs = true; + } else + pll_flags |= RADEON_PLL_USE_REF_DIV; + } + } + } + + DRM_DEBUG("\n"); + + if (!use_bios_divs) { + radeon_compute_pll(pll, mode->clock, &freq, &feedback_div, &reference_div, &post_divider, pll_flags); + + for (post_div = &post_divs[0]; post_div->divider; ++post_div) { + if (post_div->divider == post_divider) + break; + } + + if (!post_div->divider) { + post_div = &post_divs[0]; + } + + DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n", + (unsigned)freq, + feedback_div, + reference_div, + post_divider); + + ppll_ref_div = reference_div; +#if defined(__powerpc__) && (0) /* TODO */ + /* apparently programming this otherwise causes a hang??? */ + if (info->MacModel == RADEON_MAC_IBOOK) + state->ppll_div_3 = 0x000600ad; + else +#endif + ppll_div_3 = (feedback_div | (post_div->bitvalue << 16)); + htotal_cntl = mode->htotal & 0x7; + + } + + vclk_ecp_cntl = (RADEON_READ_PLL(dev_priv, RADEON_VCLK_ECP_CNTL) & + ~RADEON_VCLK_SRC_SEL_MASK) | RADEON_VCLK_SRC_SEL_PPLLCLK; + + pll_gain = radeon_compute_pll_gain(dev_priv->mode_info.pll.reference_freq, + ppll_ref_div & RADEON_PPLL_REF_DIV_MASK, + ppll_div_3 & RADEON_PPLL_FB3_DIV_MASK); + + if (dev_priv->flags & RADEON_IS_MOBILITY) { + /* A temporal workaround for the occational blanking on certain laptop panels. + This appears to related to the PLL divider registers (fail to lock?). + It occurs even when all dividers are the same with their old settings. + In this case we really don't need to fiddle with PLL registers. + By doing this we can avoid the blanking problem with some panels. + */ + if ((ppll_ref_div == (RADEON_READ_PLL(dev_priv, RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) && + (ppll_div_3 == (RADEON_READ_PLL(dev_priv, RADEON_PPLL_DIV_3) & + (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) { + RADEON_WRITE_P(RADEON_CLOCK_CNTL_INDEX, + RADEON_PLL_DIV_SEL, + ~(RADEON_PLL_DIV_SEL)); + radeon_pll_errata_after_index(dev_priv); + return; + } + } + + RADEON_WRITE_PLL_P(dev_priv, RADEON_VCLK_ECP_CNTL, + RADEON_VCLK_SRC_SEL_CPUCLK, + ~(RADEON_VCLK_SRC_SEL_MASK)); + RADEON_WRITE_PLL_P(dev_priv, + RADEON_PPLL_CNTL, + RADEON_PPLL_RESET + | RADEON_PPLL_ATOMIC_UPDATE_EN + | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN + | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT), + ~(RADEON_PPLL_RESET + | RADEON_PPLL_ATOMIC_UPDATE_EN + | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN + | RADEON_PPLL_PVG_MASK)); + + RADEON_WRITE_P(RADEON_CLOCK_CNTL_INDEX, + RADEON_PLL_DIV_SEL, + ~(RADEON_PLL_DIV_SEL)); + radeon_pll_errata_after_index(dev_priv); + + if (radeon_is_r300(dev_priv) || + (dev_priv->chip_family == CHIP_RS300) || + (dev_priv->chip_family == CHIP_RS400) || + (dev_priv->chip_family == CHIP_RS480)) { + if (ppll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) { + /* When restoring console mode, use saved PPLL_REF_DIV + * setting. + */ + RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, + ppll_ref_div, + 0); + } else { + /* R300 uses ref_div_acc field as real ref divider */ + RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, + (ppll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT), + ~R300_PPLL_REF_DIV_ACC_MASK); + } + } else { + RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, + ppll_ref_div, + ~RADEON_PPLL_REF_DIV_MASK); + } + + RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_DIV_3, + ppll_div_3, + ~RADEON_PPLL_FB3_DIV_MASK); + + RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_DIV_3, + ppll_div_3, + ~RADEON_PPLL_POST3_DIV_MASK); + + radeon_pll_write_update(dev); + radeon_pll_wait_for_read_update_complete(dev); + + RADEON_WRITE_PLL(dev_priv, RADEON_HTOTAL_CNTL, htotal_cntl); + + RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_CNTL, + 0, + ~(RADEON_PPLL_RESET + | RADEON_PPLL_SLEEP + | RADEON_PPLL_ATOMIC_UPDATE_EN + | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN)); + + DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n", + ppll_ref_div, + ppll_div_3, + (unsigned)htotal_cntl, + RADEON_READ_PLL(dev_priv, RADEON_PPLL_CNTL)); + DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n", + ppll_ref_div & RADEON_PPLL_REF_DIV_MASK, + ppll_div_3 & RADEON_PPLL_FB3_DIV_MASK, + (ppll_div_3 & RADEON_PPLL_POST3_DIV_MASK) >> 16); + + mdelay(50); /* Let the clock to lock */ + + RADEON_WRITE_PLL_P(dev_priv, RADEON_VCLK_ECP_CNTL, + RADEON_VCLK_SRC_SEL_PPLLCLK, + ~(RADEON_VCLK_SRC_SEL_MASK)); + + /*RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);*/ + +} + +static bool radeon_set_crtc2_base(struct drm_crtc *crtc, int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_framebuffer *radeon_fb; + struct drm_radeon_gem_object *obj_priv; + uint32_t base; + uint32_t crtc2_offset, crtc2_offset_cntl, crtc2_tile_x0_y0 = 0; + uint32_t crtc2_pitch; + + DRM_DEBUG("\n"); + + radeon_fb = to_radeon_framebuffer(crtc->fb); + + obj_priv = radeon_fb->obj->driver_private; + + crtc2_offset = obj_priv->bo->offset; + + crtc2_offset_cntl = 0; + + /* TODO tiling */ + if (0) { + if (radeon_is_r300(dev_priv)) + crtc2_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | + R300_CRTC_MICRO_TILE_BUFFER_DIS | + R300_CRTC_MACRO_TILE_EN); + else + crtc2_offset_cntl |= RADEON_CRTC_TILE_EN; + } else { + if (radeon_is_r300(dev_priv)) + crtc2_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN | + R300_CRTC_MICRO_TILE_BUFFER_DIS | + R300_CRTC_MACRO_TILE_EN); + else + crtc2_offset_cntl &= ~RADEON_CRTC_TILE_EN; + } + + base = obj_priv->bo->offset; + + /* TODO more tiling */ + if (0) { + if (radeon_is_r300(dev_priv)) { + crtc2_tile_x0_y0 = x | (y << 16); + base &= ~0x7ff; + } else { + int byteshift = crtc->fb->bits_per_pixel >> 4; + int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; + base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); + crtc2_offset_cntl |= (y % 16); + } + } else { + int offset = y * crtc->fb->pitch + x; + switch (crtc->fb->bits_per_pixel) { + case 15: + case 16: + offset *= 2; + break; + case 24: + offset *= 3; + break; + case 32: + offset *= 4; + break; + default: + return false; + } + base += offset; + } + + base &= ~7; + + /* update sarea TODO */ + + crtc2_offset = base; + + crtc2_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + + ((crtc->fb->bits_per_pixel * 8) - 1)) / + (crtc->fb->bits_per_pixel * 8)); + crtc2_pitch |= crtc2_pitch << 16; + + RADEON_WRITE(RADEON_DISPLAY2_BASE_ADDR, dev_priv->fb_location); + + if (radeon_is_r300(dev_priv)) + RADEON_WRITE(R300_CRTC2_TILE_X0_Y0, crtc2_tile_x0_y0); + RADEON_WRITE(RADEON_CRTC2_OFFSET_CNTL, crtc2_offset_cntl); + RADEON_WRITE(RADEON_CRTC2_OFFSET, crtc2_offset); + RADEON_WRITE(RADEON_CRTC2_PITCH, crtc2_pitch); + + return true; +} + +static bool radeon_set_crtc2_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + int format; + int hsync_start; + int hsync_wid; + int vsync_wid; + uint32_t crtc2_gen_cntl; + uint32_t crtc2_h_total_disp; + uint32_t crtc2_h_sync_strt_wid; + uint32_t crtc2_v_total_disp; + uint32_t crtc2_v_sync_strt_wid; + uint32_t disp2_merge_cntl; + uint32_t fp_h2_sync_strt_wid; + uint32_t fp_v2_sync_strt_wid; + + DRM_DEBUG("\n"); + + switch (crtc->fb->bits_per_pixel) { + + case 15: /* 555 */ + format = 3; + break; + case 16: /* 565 */ + format = 4; + break; + case 24: /* RGB */ + format = 5; + break; + case 32: /* xRGB */ + format = 6; + break; + default: + return false; + } + + crtc2_h_total_disp = + ((((mode->crtc_htotal / 8) - 1) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + hsync_start = mode->crtc_hsync_start - 8; + + crtc2_h_sync_strt_wid = ((hsync_start & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + /* This works for double scan mode. */ + crtc2_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + crtc2_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC2_V_SYNC_POL + : 0)); + + /* check to see if TV DAC is enabled for another crtc and keep it enabled */ + if (RADEON_READ(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) + crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON; + else + crtc2_gen_cntl = 0; + + crtc2_gen_cntl |= (RADEON_CRTC2_EN + | (format << 8) + | RADEON_CRTC2_VSYNC_DIS + | RADEON_CRTC2_HSYNC_DIS + | RADEON_CRTC2_DISP_DIS + | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) + ? RADEON_CRTC2_DBL_SCAN_EN + : 0) + | ((mode->flags & DRM_MODE_FLAG_CSYNC) + ? RADEON_CRTC2_CSYNC_EN + : 0) + | ((mode->flags & DRM_MODE_FLAG_INTERLACE) + ? RADEON_CRTC2_INTERLACE_EN + : 0)); + + disp2_merge_cntl = RADEON_READ(RADEON_DISP2_MERGE_CNTL); + disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; + + fp_h2_sync_strt_wid = crtc2_h_sync_strt_wid; + fp_v2_sync_strt_wid = crtc2_v_sync_strt_wid; + + RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, + crtc2_gen_cntl | RADEON_CRTC2_VSYNC_DIS | + RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_DISP_DIS | + RADEON_CRTC2_DISP_REQ_EN_B); + + RADEON_WRITE(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp); + RADEON_WRITE(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid); + RADEON_WRITE(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp); + RADEON_WRITE(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid); + + RADEON_WRITE(RADEON_FP_H2_SYNC_STRT_WID, fp_h2_sync_strt_wid); + RADEON_WRITE(RADEON_FP_V2_SYNC_STRT_WID, fp_v2_sync_strt_wid); + + RADEON_WRITE(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl); + + RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); + + return true; + +} + +static void radeon_set_pll2(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct drm_encoder *encoder; + uint32_t feedback_div = 0; + uint32_t reference_div = 0; + uint32_t post_divider = 0; + uint32_t freq = 0; + uint8_t pll_gain; + int pll_flags = RADEON_PLL_LEGACY | RADEON_PLL_PREFER_LOW_REF_DIV; + bool use_bios_divs = false; + /* PLL2 registers */ + uint32_t p2pll_ref_div = 0; + uint32_t p2pll_div_0 = 0; + uint32_t htotal_cntl2 = 0; + uint32_t pixclks_cntl; + + struct radeon_pll *pll = &dev_priv->mode_info.pll; + + struct { + int divider; + int bitvalue; + } *post_div, post_divs[] = { + /* From RAGE 128 VR/RAGE 128 GL Register + * Reference Manual (Technical Reference + * Manual P/N RRG-G04100-C Rev. 0.04), page + * 3-17 (PLL_DIV_[3:0]). + */ + { 1, 0 }, /* VCLK_SRC */ + { 2, 1 }, /* VCLK_SRC/2 */ + { 4, 2 }, /* VCLK_SRC/4 */ + { 8, 3 }, /* VCLK_SRC/8 */ + { 3, 4 }, /* VCLK_SRC/3 */ + { 6, 6 }, /* VCLK_SRC/6 */ + { 12, 7 }, /* VCLK_SRC/12 */ + { 0, 0 } + }; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) + pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; + if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->use_bios_dividers) { + p2pll_ref_div = radeon_encoder->panel_ref_divider; + p2pll_div_0 = (radeon_encoder->panel_fb_divider | + (radeon_encoder->panel_post_divider << 16)); + htotal_cntl2 = 0; + use_bios_divs = true; + } else + pll_flags |= RADEON_PLL_USE_REF_DIV; + } + } + } + + DRM_DEBUG("\n"); + + if (!use_bios_divs) { + radeon_compute_pll(pll, mode->clock, &freq, &feedback_div, &reference_div, &post_divider, pll_flags); + + for (post_div = &post_divs[0]; post_div->divider; ++post_div) { + if (post_div->divider == post_divider) + break; + } + + if (!post_div->divider) { + post_div = &post_divs[0]; + } + + DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n", + (unsigned)freq, + feedback_div, + reference_div, + post_divider); + + p2pll_ref_div = reference_div; + p2pll_div_0 = (feedback_div | (post_div->bitvalue << 16)); + htotal_cntl2 = mode->htotal & 0x7; + + } + + pixclks_cntl = ((RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL) & + ~(RADEON_PIX2CLK_SRC_SEL_MASK)) | + RADEON_PIX2CLK_SRC_SEL_P2PLLCLK); + + pll_gain = radeon_compute_pll_gain(dev_priv->mode_info.pll.reference_freq, + p2pll_ref_div & RADEON_P2PLL_REF_DIV_MASK, + p2pll_div_0 & RADEON_P2PLL_FB0_DIV_MASK); + + + RADEON_WRITE_PLL_P(dev_priv, RADEON_PIXCLKS_CNTL, + RADEON_PIX2CLK_SRC_SEL_CPUCLK, + ~(RADEON_PIX2CLK_SRC_SEL_MASK)); + + RADEON_WRITE_PLL_P(dev_priv, + RADEON_P2PLL_CNTL, + RADEON_P2PLL_RESET + | RADEON_P2PLL_ATOMIC_UPDATE_EN + | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT), + ~(RADEON_P2PLL_RESET + | RADEON_P2PLL_ATOMIC_UPDATE_EN + | RADEON_P2PLL_PVG_MASK)); + + + RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_REF_DIV, + p2pll_ref_div, + ~RADEON_P2PLL_REF_DIV_MASK); + + RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_DIV_0, + p2pll_div_0, + ~RADEON_P2PLL_FB0_DIV_MASK); + + RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_DIV_0, + p2pll_div_0, + ~RADEON_P2PLL_POST0_DIV_MASK); + + radeon_pll2_write_update(dev); + radeon_pll2_wait_for_read_update_complete(dev); + + RADEON_WRITE_PLL(dev_priv, RADEON_HTOTAL2_CNTL, htotal_cntl2); + + RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_CNTL, + 0, + ~(RADEON_P2PLL_RESET + | RADEON_P2PLL_SLEEP + | RADEON_P2PLL_ATOMIC_UPDATE_EN)); + + DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n", + (unsigned)p2pll_ref_div, + (unsigned)p2pll_div_0, + (unsigned)htotal_cntl2, + RADEON_READ_PLL(dev_priv, RADEON_P2PLL_CNTL)); + DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n", + (unsigned)p2pll_ref_div & RADEON_P2PLL_REF_DIV_MASK, + (unsigned)p2pll_div_0 & RADEON_P2PLL_FB0_DIV_MASK, + (unsigned)((p2pll_div_0 & + RADEON_P2PLL_POST0_DIV_MASK) >>16)); + + mdelay(50); /* Let the clock to lock */ + + RADEON_WRITE_PLL_P(dev_priv, RADEON_PIXCLKS_CNTL, + RADEON_PIX2CLK_SRC_SEL_P2PLLCLK, + ~(RADEON_PIX2CLK_SRC_SEL_MASK)); + + RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, pixclks_cntl); + +} + +static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +void radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + + switch(radeon_crtc->crtc_id) { + case 0: + radeon_set_crtc1_base(crtc, x, y); + break; + case 1: + radeon_set_crtc2_base(crtc, x, y); + break; + + } +} + +static void radeon_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + + DRM_DEBUG("\n"); + + /* TODO TV */ + + radeon_crtc_set_base(crtc, x, y); + + switch(radeon_crtc->crtc_id) { + case 0: + radeon_set_crtc1_timing(crtc, adjusted_mode); + radeon_set_pll1(crtc, adjusted_mode); + break; + case 1: + radeon_set_crtc2_timing(crtc, adjusted_mode); + radeon_set_pll2(crtc, adjusted_mode); + break; + + } +} + +static void radeon_crtc_prepare(struct drm_crtc *crtc) +{ + radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void radeon_crtc_commit(struct drm_crtc *crtc) +{ + radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static const struct drm_crtc_helper_funcs legacy_helper_funcs = { + .dpms = radeon_crtc_dpms, + .mode_fixup = radeon_crtc_mode_fixup, + .mode_set = radeon_crtc_mode_set, + .mode_set_base = radeon_crtc_set_base, + .prepare = radeon_crtc_prepare, + .commit = radeon_crtc_commit, +}; + + +void radeon_legacy_init_crtc(struct drm_device *dev, + struct radeon_crtc *radeon_crtc) +{ + drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); +} diff --git a/linux-core/radeon_legacy_encoders.c b/linux-core/radeon_legacy_encoders.c new file mode 100644 index 00000000..172f93de --- /dev/null +++ b/linux-core/radeon_legacy_encoders.c @@ -0,0 +1,1037 @@ +/* + * Copyright 2007-8 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + */ +#include "drmP.h" +#include "drm_crtc_helper.h" +#include "radeon_drm.h" +#include "radeon_drv.h" + + +static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + int xres = mode->hdisplay; + int yres = mode->vdisplay; + bool hscale = true, vscale = true; + int hsync_wid; + int vsync_wid; + int hsync_start; + uint32_t scale, inc; + uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; + uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; + + DRM_DEBUG("\n"); + + fp_vert_stretch = RADEON_READ(RADEON_FP_VERT_STRETCH) & + (RADEON_VERT_STRETCH_RESERVED | + RADEON_VERT_AUTO_RATIO_INC); + fp_horz_stretch = RADEON_READ(RADEON_FP_HORZ_STRETCH) & + (RADEON_HORZ_FP_LOOP_STRETCH | + RADEON_HORZ_AUTO_RATIO_INC); + + crtc_more_cntl = 0; + if ((dev_priv->chip_family == CHIP_RS100) || + (dev_priv->chip_family == CHIP_RS200)) { + /* This is to workaround the asic bug for RMX, some versions + of BIOS dosen't have this register initialized correctly. */ + crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; + } + + + fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + hsync_start = mode->crtc_hsync_start - 8; + + fp_h_sync_strt_wid = ((hsync_start & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC_V_SYNC_POL + : 0)); + + fp_horz_vert_active = 0; + + if (radeon_encoder->panel_xres == 0 || + radeon_encoder->panel_yres == 0) { + hscale = false; + vscale = false; + } else { + if (xres > radeon_encoder->panel_xres) + xres = radeon_encoder->panel_xres; + if (yres > radeon_encoder->panel_yres) + yres = radeon_encoder->panel_yres; + + if (xres == radeon_encoder->panel_xres) + hscale = false; + if (yres == radeon_encoder->panel_yres) + vscale = false; + } + + if (radeon_encoder->flags & RADEON_USE_RMX) { + if (radeon_encoder->rmx_type != RMX_CENTER) { + if (!hscale) + fp_horz_stretch |= ((xres/8-1) << 16); + else { + inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; + scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) + / radeon_encoder->panel_xres + 1; + fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | + RADEON_HORZ_STRETCH_BLEND | + RADEON_HORZ_STRETCH_ENABLE | + ((radeon_encoder->panel_xres/8-1) << 16)); + } + + if (!vscale) + fp_vert_stretch |= ((yres-1) << 12); + else { + inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; + scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) + / radeon_encoder->panel_yres + 1; + fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | + RADEON_VERT_STRETCH_ENABLE | + RADEON_VERT_STRETCH_BLEND | + ((radeon_encoder->panel_yres-1) << 12)); + } + } else if (radeon_encoder->rmx_type == RMX_CENTER) { + int blank_width; + + fp_horz_stretch |= ((xres/8-1) << 16); + fp_vert_stretch |= ((yres-1) << 12); + + crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | + RADEON_CRTC_AUTO_VERT_CENTER_EN); + + blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; + if (blank_width > 110) + blank_width = 110; + + fp_crtc_h_total_disp = (((blank_width) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + + fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC_V_SYNC_POL + : 0))); + + fp_horz_vert_active = (((radeon_encoder->panel_yres) & 0xfff) | + (((radeon_encoder->panel_xres / 8) & 0x1ff) << 16)); + } + } else { + fp_horz_stretch |= ((xres/8-1) << 16); + fp_vert_stretch |= ((yres-1) << 12); + } + + RADEON_WRITE(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); + RADEON_WRITE(RADEON_FP_VERT_STRETCH, fp_vert_stretch); + RADEON_WRITE(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); + RADEON_WRITE(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); + RADEON_WRITE(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); + RADEON_WRITE(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); + RADEON_WRITE(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); + RADEON_WRITE(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); + +} + +static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; + + DRM_DEBUG("\n"); + + switch (mode) { + case DRM_MODE_DPMS_ON: + disp_pwr_man = RADEON_READ(RADEON_DISP_PWR_MAN); + disp_pwr_man |= RADEON_AUTO_PWRUP_EN; + RADEON_WRITE(RADEON_DISP_PWR_MAN, disp_pwr_man); + lvds_pll_cntl = RADEON_READ(RADEON_LVDS_PLL_CNTL); + lvds_pll_cntl |= RADEON_LVDS_PLL_EN; + RADEON_WRITE(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); + udelay(1000); + lvds_pll_cntl = RADEON_READ(RADEON_LVDS_PLL_CNTL); + lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; + RADEON_WRITE(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); + + /* enable lvds, turn on voltage */ + lvds_gen_cntl = RADEON_READ(RADEON_LVDS_GEN_CNTL); + lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); + RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); + udelay(radeon_encoder->panel_digon_delay * 1000); + + /* enable data */ + lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS); + RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); + udelay(radeon_encoder->panel_blon_delay * 1000); + + /* enable backlight */ + lvds_gen_cntl |= RADEON_LVDS_BLON; + RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + pixclks_cntl = RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL); + RADEON_WRITE_PLL_P(dev_priv, RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); + lvds_gen_cntl = RADEON_READ(RADEON_LVDS_GEN_CNTL); + lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; + lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); + RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); + RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, pixclks_cntl); + break; + } +} + +static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) +{ + radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) +{ + radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t lvds_pll_cntl, lvds_gen_cntl; + + DRM_DEBUG("\n"); + + if (radeon_crtc->crtc_id == 0) + radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); + + lvds_pll_cntl = RADEON_READ(RADEON_LVDS_PLL_CNTL); + lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; + if (radeon_encoder->lvds_gen_cntl) + lvds_gen_cntl = radeon_encoder->lvds_gen_cntl; + else + lvds_gen_cntl = RADEON_READ(RADEON_LVDS_GEN_CNTL); + lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; + lvds_gen_cntl &= ~(RADEON_LVDS_ON | + RADEON_LVDS_BLON | + RADEON_LVDS_EN | + RADEON_LVDS_RST_FM); + + if (radeon_is_r300(dev_priv)) + lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK); + + if (radeon_crtc->crtc_id == 0) { + if (radeon_is_r300(dev_priv)) { + if (radeon_encoder->flags & RADEON_USE_RMX) + lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; + } else + lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; + } else { + if (radeon_is_r300(dev_priv)) { + lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2; + } else + lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2; + } + + RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); + RADEON_WRITE(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); + + if (dev_priv->chip_family == CHIP_RV410) + RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, 0); +} + +static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + + radeon_encoder->flags &= ~RADEON_USE_RMX; + + if (radeon_encoder->rmx_type != RMX_OFF) + radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); + + return true; +} + +static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { + .dpms = radeon_legacy_lvds_dpms, + .mode_fixup = radeon_legacy_lvds_mode_fixup, + .prepare = radeon_legacy_lvds_prepare, + .mode_set = radeon_legacy_lvds_mode_set, + .commit = radeon_legacy_lvds_commit, +}; + + +static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { + .destroy = radeon_enc_destroy, +}; + + +struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index) +{ + struct radeon_encoder *radeon_encoder; + struct drm_encoder *encoder; + + DRM_DEBUG("\n"); + + radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); + if (!radeon_encoder) { + return NULL; + } + + encoder = &radeon_encoder->base; + + encoder->possible_crtcs = 0x3; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, + DRM_MODE_ENCODER_LVDS); + + drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); + + /* get the lvds info from the bios */ + radeon_combios_get_lvds_info(radeon_encoder); + + /* LVDS gets default RMX full scaling */ + radeon_encoder->rmx_type = RMX_FULL; + + return encoder; +} + +static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint32_t crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); + uint32_t dac_cntl = RADEON_READ(RADEON_DAC_CNTL); + uint32_t dac_macro_cntl = RADEON_READ(RADEON_DAC_MACRO_CNTL); + + DRM_DEBUG("\n"); + + switch(mode) { + case DRM_MODE_DPMS_ON: + crtc_ext_cntl |= RADEON_CRTC_CRT_ON; + dac_cntl &= ~RADEON_DAC_PDWN; + dac_macro_cntl &= ~(RADEON_DAC_PDWN_R | + RADEON_DAC_PDWN_G | + RADEON_DAC_PDWN_B); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON; + dac_cntl |= RADEON_DAC_PDWN; + dac_macro_cntl |= (RADEON_DAC_PDWN_R | + RADEON_DAC_PDWN_G | + RADEON_DAC_PDWN_B); + break; + } + + RADEON_WRITE(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); + RADEON_WRITE(RADEON_DAC_CNTL, dac_cntl); + RADEON_WRITE(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); + +} + +static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) +{ + radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) +{ + radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl; + + DRM_DEBUG("\n"); + + if (radeon_crtc->crtc_id == 0) + radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); + + if (radeon_crtc->crtc_id == 0) { + if (dev_priv->chip_family == CHIP_R200 || radeon_is_r300(dev_priv)) { + disp_output_cntl = RADEON_READ(RADEON_DISP_OUTPUT_CNTL) & + ~(RADEON_DISP_DAC_SOURCE_MASK); + RADEON_WRITE(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); + } else { + dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL); + RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); + } + } else { + if (dev_priv->chip_family == CHIP_R200 || radeon_is_r300(dev_priv)) { + disp_output_cntl = RADEON_READ(RADEON_DISP_OUTPUT_CNTL) & + ~(RADEON_DISP_DAC_SOURCE_MASK); + disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2; + RADEON_WRITE(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); + } else { + dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL; + RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); + } + } + + dac_cntl = (RADEON_DAC_MASK_ALL | + RADEON_DAC_VGA_ADR_EN | + /* TODO 6-bits */ + RADEON_DAC_8BIT_EN); + + RADEON_WRITE_P(RADEON_DAC_CNTL, + dac_cntl, + RADEON_DAC_RANGE_CNTL | + RADEON_DAC_BLANKING); + + dac_macro_cntl = RADEON_READ(RADEON_DAC_MACRO_CNTL); + RADEON_WRITE(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); +} + +static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + // FIXME + return connector_status_disconnected; + +} + +static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { + .dpms = radeon_legacy_primary_dac_dpms, + .mode_fixup = radeon_legacy_primary_dac_mode_fixup, + .prepare = radeon_legacy_primary_dac_prepare, + .mode_set = radeon_legacy_primary_dac_mode_set, + .commit = radeon_legacy_primary_dac_commit, + .detect = radeon_legacy_primary_dac_detect, +}; + + +static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { + .destroy = radeon_enc_destroy, +}; + +struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int has_tv) +{ + struct radeon_encoder *radeon_encoder; + struct drm_encoder *encoder; + + DRM_DEBUG("\n"); + + radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); + if (!radeon_encoder) { + return NULL; + } + + encoder = &radeon_encoder->base; + + encoder->possible_crtcs = 0x3; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, + DRM_MODE_ENCODER_DAC); + + drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); + + /* TODO get the primary dac vals from bios tables */ + //radeon_combios_get_lvds_info(radeon_encoder); + + return encoder; +} + + +static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint32_t fp_gen_cntl = RADEON_READ(RADEON_FP_GEN_CNTL); + + DRM_DEBUG("\n"); + + switch(mode) { + case DRM_MODE_DPMS_ON: + fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); + break; + } + + RADEON_WRITE(RADEON_FP_GEN_CNTL, fp_gen_cntl); +} + +static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) +{ + radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) +{ + radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl; + int i; + + DRM_DEBUG("\n"); + + if (radeon_crtc->crtc_id == 0) + radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); + + tmp = tmds_pll_cntl = RADEON_READ(RADEON_TMDS_PLL_CNTL); + tmp &= 0xfffff; + if (dev_priv->chip_family == CHIP_RV280) { + /* bit 22 of TMDS_PLL_CNTL is read-back inverted */ + tmp ^= (1 << 22); + tmds_pll_cntl ^= (1 << 22); + } + + for (i = 0; i < 4; i++) { + if (radeon_encoder->tmds_pll[i].freq == 0) + break; + if ((uint32_t)(mode->clock / 10) < radeon_encoder->tmds_pll[i].freq) { + tmp = radeon_encoder->tmds_pll[i].value ; + break; + } + } + + if (radeon_is_r300(dev_priv) || (dev_priv->chip_family == CHIP_RV280)) { + if (tmp & 0xfff00000) + tmds_pll_cntl = tmp; + else { + tmds_pll_cntl &= 0xfff00000; + tmds_pll_cntl |= tmp; + } + } else + tmds_pll_cntl = tmp; + + tmds_transmitter_cntl = RADEON_READ(RADEON_TMDS_TRANSMITTER_CNTL) & + ~(RADEON_TMDS_TRANSMITTER_PLLRST); + + if (dev_priv->chip_family == CHIP_R200 || + dev_priv->chip_family == CHIP_R100 || + radeon_is_r300(dev_priv)) + tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN); + else /* RV chips got this bit reversed */ + tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN; + + fp_gen_cntl = (RADEON_READ(RADEON_FP_GEN_CNTL) | + (RADEON_FP_CRTC_DONT_SHADOW_VPAR | + RADEON_FP_CRTC_DONT_SHADOW_HEND)); + + fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); + + if (1) // FIXME rgbBits == 8 + fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ + else + fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */ + + if (radeon_crtc->crtc_id == 0) { + if (radeon_is_r300(dev_priv) || dev_priv->chip_family == CHIP_R200) { + fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; + if (radeon_encoder->flags & RADEON_USE_RMX) + fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; + else + fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; + } else + fp_gen_cntl |= RADEON_FP_SEL_CRTC1; + } else { + if (radeon_is_r300(dev_priv) || dev_priv->chip_family == CHIP_R200) { + fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; + fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2; + } else + fp_gen_cntl |= RADEON_FP_SEL_CRTC2; + } + + RADEON_WRITE(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl); + RADEON_WRITE(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl); + RADEON_WRITE(RADEON_FP_GEN_CNTL, fp_gen_cntl); +} + +static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { + .dpms = radeon_legacy_tmds_int_dpms, + .mode_fixup = radeon_legacy_tmds_int_mode_fixup, + .prepare = radeon_legacy_tmds_int_prepare, + .mode_set = radeon_legacy_tmds_int_mode_set, + .commit = radeon_legacy_tmds_int_commit, +}; + + +static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { + .destroy = radeon_enc_destroy, +}; + +struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index) +{ + struct radeon_encoder *radeon_encoder; + struct drm_encoder *encoder; + + DRM_DEBUG("\n"); + + radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); + if (!radeon_encoder) { + return NULL; + } + + encoder = &radeon_encoder->base; + + encoder->possible_crtcs = 0x3; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); + + radeon_combios_get_tmds_info(radeon_encoder); + + return encoder; +} + +static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint32_t fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); + + DRM_DEBUG("\n"); + + switch(mode) { + case DRM_MODE_DPMS_ON: + fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN; + fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + fp2_gen_cntl |= RADEON_FP2_BLANK_EN; + fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); + break; + } + + RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); +} + +static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) +{ + radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) +{ + radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); + + DRM_DEBUG("\n"); + + if (radeon_crtc->crtc_id == 0) + radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); + + if (1) // FIXME rgbBits == 8 + fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */ + else + fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */ + + fp2_gen_cntl &= ~(RADEON_FP2_ON | + RADEON_FP2_DVO_EN | + RADEON_FP2_DVO_RATE_SEL_SDR); + + /* XXX: these are oem specific */ + if (radeon_is_r300(dev_priv)) { + if ((dev->pdev->device == 0x4850) && + (dev->pdev->subsystem_vendor == 0x1028) && + (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */ + fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE; + else + fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE; + + /*if (mode->clock > 165000) + fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ + } + + if (radeon_crtc->crtc_id == 0) { + if ((dev_priv->chip_family == CHIP_R200) || radeon_is_r300(dev_priv)) { + fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; + if (radeon_encoder->flags & RADEON_USE_RMX) + fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; + else + fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; + } else + fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2; + } else { + if ((dev_priv->chip_family == CHIP_R200) || radeon_is_r300(dev_priv)) { + fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; + fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; + } else + fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2; + } + + RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); +} + +static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { + .dpms = radeon_legacy_tmds_ext_dpms, + .mode_fixup = radeon_legacy_tmds_ext_mode_fixup, + .prepare = radeon_legacy_tmds_ext_prepare, + .mode_set = radeon_legacy_tmds_ext_mode_set, + .commit = radeon_legacy_tmds_ext_commit, +}; + + +static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { + .destroy = radeon_enc_destroy, +}; + +struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index) +{ + struct radeon_encoder *radeon_encoder; + struct drm_encoder *encoder; + + DRM_DEBUG("\n"); + + radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); + if (!radeon_encoder) { + return NULL; + } + + encoder = &radeon_encoder->base; + + encoder->possible_crtcs = 0x3; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); + + //radeon_combios_get_tmds_info(radeon_encoder); + return encoder; +} + +static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0; + //uint32_t tv_master_cntl = 0; + + DRM_DEBUG("\n"); + + if (dev_priv->chip_family == CHIP_R200) + fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); + else { + crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); + // FIXME TV + //tv_master_cntl = RADEON_READ(RADEON_TV_MASTER_CNTL); + tv_dac_cntl = RADEON_READ(RADEON_TV_DAC_CNTL); + } + + switch(mode) { + case DRM_MODE_DPMS_ON: + if (dev_priv->chip_family == CHIP_R200) + fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); + else { + crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON; + //tv_master_cntl |= RADEON_TV_ON; + if (dev_priv->chip_family == CHIP_R420 || + dev_priv->chip_family == CHIP_RV410) + tv_dac_cntl &= ~(R420_TV_DAC_RDACPD | + R420_TV_DAC_GDACPD | + R420_TV_DAC_BDACPD | + RADEON_TV_DAC_BGSLEEP); + else + tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD | + RADEON_TV_DAC_GDACPD | + RADEON_TV_DAC_BDACPD | + RADEON_TV_DAC_BGSLEEP); + } + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + if (dev_priv->chip_family == CHIP_R200) + fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); + else { + crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; + //tv_master_cntl &= ~RADEON_TV_ON; + if (dev_priv->chip_family == CHIP_R420 || + dev_priv->chip_family == CHIP_RV410) + tv_dac_cntl |= (R420_TV_DAC_RDACPD | + R420_TV_DAC_GDACPD | + R420_TV_DAC_BDACPD | + RADEON_TV_DAC_BGSLEEP); + else + tv_dac_cntl |= (RADEON_TV_DAC_RDACPD | + RADEON_TV_DAC_GDACPD | + RADEON_TV_DAC_BDACPD | + RADEON_TV_DAC_BGSLEEP); + } + break; + } + + if (dev_priv->chip_family == CHIP_R200) + RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); + else { + RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); + //RADEON_WRITE(RADEON_TV_MASTER_CNTL, tv_master_cntl); + RADEON_WRITE(RADEON_TV_DAC_CNTL, tv_dac_cntl); + } + +} + +static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) +{ + radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) +{ + radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_radeon_private *dev_priv = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0; + uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0; + + DRM_DEBUG("\n"); + + if (radeon_crtc->crtc_id == 0) + radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); + + if (dev_priv->chip_family != CHIP_R200) { + tv_dac_cntl = RADEON_READ(RADEON_TV_DAC_CNTL); + if (dev_priv->chip_family == CHIP_R420 || + dev_priv->chip_family == CHIP_RV410) { + tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | + RADEON_TV_DAC_BGADJ_MASK | + R420_TV_DAC_DACADJ_MASK | + R420_TV_DAC_RDACPD | + R420_TV_DAC_GDACPD | + R420_TV_DAC_GDACPD | + R420_TV_DAC_TVENABLE); + } else { + tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | + RADEON_TV_DAC_BGADJ_MASK | + RADEON_TV_DAC_DACADJ_MASK | + RADEON_TV_DAC_RDACPD | + RADEON_TV_DAC_GDACPD | + RADEON_TV_DAC_GDACPD); + } + + // FIXME TV + tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | + RADEON_TV_DAC_NHOLD | + RADEON_TV_DAC_STD_PS2 | + radeon_encoder->ps2_tvdac_adj); + + RADEON_WRITE(RADEON_TV_DAC_CNTL, tv_dac_cntl); + } + + if (radeon_is_r300(dev_priv)) { + gpiopad_a = RADEON_READ(RADEON_GPIOPAD_A) | 1; + disp_output_cntl = RADEON_READ(RADEON_DISP_OUTPUT_CNTL); + } else if (dev_priv->chip_family == CHIP_R200) + fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); + else + disp_hw_debug = RADEON_READ(RADEON_DISP_HW_DEBUG); + + dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL; + + if (radeon_crtc->crtc_id == 0) { + if (radeon_is_r300(dev_priv)) { + disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; + disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC; + } else if (dev_priv->chip_family == CHIP_R200) { + fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | + RADEON_FP2_DVO_RATE_SEL_SDR); + } else + disp_hw_debug |= RADEON_CRT2_DISP1_SEL; + } else { + if (radeon_is_r300(dev_priv)) { + disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; + disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2; + } else if (dev_priv->chip_family == CHIP_R200) { + fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | + RADEON_FP2_DVO_RATE_SEL_SDR); + fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; + } else + disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; + } + + RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); + + if (radeon_is_r300(dev_priv)) { + RADEON_WRITE_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); + RADEON_WRITE(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl); + } else if (dev_priv->chip_family == CHIP_R200) + RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); + else + RADEON_WRITE(RADEON_DISP_HW_DEBUG, disp_hw_debug); + +} + +static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + // FIXME + return connector_status_disconnected; + +} + +static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { + .dpms = radeon_legacy_tv_dac_dpms, + .mode_fixup = radeon_legacy_tv_dac_mode_fixup, + .prepare = radeon_legacy_tv_dac_prepare, + .mode_set = radeon_legacy_tv_dac_mode_set, + .commit = radeon_legacy_tv_dac_commit, + .detect = radeon_legacy_tv_dac_detect, +}; + + +static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = { + .destroy = radeon_enc_destroy, +}; + +struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int has_tv) +{ + struct radeon_encoder *radeon_encoder; + struct drm_encoder *encoder; + + DRM_DEBUG("\n"); + + radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); + if (!radeon_encoder) { + return NULL; + } + + encoder = &radeon_encoder->base; + + encoder->possible_crtcs = 0x3; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, + DRM_MODE_ENCODER_DAC); + + drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); + + /* get the tv dac vals from bios tables */ + radeon_combios_get_tv_info(radeon_encoder); + radeon_combios_get_tv_dac_info(radeon_encoder); + + return encoder; +} diff --git a/linux-core/radeon_mode.h b/linux-core/radeon_mode.h index f75e8272..3271375d 100644 --- a/linux-core/radeon_mode.h +++ b/linux-core/radeon_mode.h @@ -87,10 +87,23 @@ enum radeon_rmx_type { RMX_CENTER, }; +enum radeon_tv_std { + TV_STD_NTSC, + TV_STD_PAL, + TV_STD_PAL_M, + TV_STD_PAL_60, + TV_STD_NTSC_J, + TV_STD_SCART_PAL, + TV_STD_SECAM, + TV_STD_PAL_CN, +}; + struct radeon_i2c_bus_rec { bool valid; uint32_t mask_clk_reg; uint32_t mask_data_reg; + uint32_t a_clk_reg; + uint32_t a_data_reg; uint32_t put_clk_reg; uint32_t put_data_reg; uint32_t get_clk_reg; @@ -101,6 +114,8 @@ struct radeon_i2c_bus_rec { uint32_t put_data_mask; uint32_t get_clk_mask; uint32_t get_data_mask; + uint32_t a_clk_mask; + uint32_t a_data_mask; }; struct radeon_bios_connector { @@ -115,8 +130,13 @@ struct radeon_bios_connector { int igp_lane_info; }; +struct radeon_tmds_pll { + uint32_t freq; + uint32_t value; +}; + #define RADEON_MAX_BIOS_CONNECTOR 16 - + #define RADEON_PLL_USE_BIOS_DIVS (1 << 0) #define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) #define RADEON_PLL_USE_REF_DIV (1 << 2) @@ -124,27 +144,177 @@ struct radeon_bios_connector { #define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4) struct radeon_pll { - uint16_t reference_freq; - uint16_t reference_div; - uint32_t pll_in_min; - uint32_t pll_in_max; - uint32_t pll_out_min; - uint32_t pll_out_max; - uint16_t xclk; - - uint32_t min_ref_div; - uint32_t max_ref_div; - uint32_t min_post_div; - uint32_t max_post_div; - uint32_t min_feedback_div; - uint32_t max_feedback_div; - uint32_t best_vco; + uint16_t reference_freq; + uint16_t reference_div; + uint32_t pll_in_min; + uint32_t pll_in_max; + uint32_t pll_out_min; + uint32_t pll_out_max; + uint16_t xclk; + + uint32_t min_ref_div; + uint32_t max_ref_div; + uint32_t min_post_div; + uint32_t max_post_div; + uint32_t min_feedback_div; + uint32_t max_feedback_div; + uint32_t best_vco; +}; + +#define MAX_H_CODE_TIMING_LEN 32 +#define MAX_V_CODE_TIMING_LEN 32 + +struct radeon_legacy_state { + + uint32_t bus_cntl; + + /* DAC */ + uint32_t dac_cntl; + uint32_t dac2_cntl; + uint32_t dac_macro_cntl; + + /* CRTC 1 */ + uint32_t crtc_gen_cntl; + uint32_t crtc_ext_cntl; + uint32_t crtc_h_total_disp; + uint32_t crtc_h_sync_strt_wid; + uint32_t crtc_v_total_disp; + uint32_t crtc_v_sync_strt_wid; + uint32_t crtc_offset; + uint32_t crtc_offset_cntl; + uint32_t crtc_pitch; + uint32_t disp_merge_cntl; + uint32_t grph_buffer_cntl; + uint32_t crtc_more_cntl; + uint32_t crtc_tile_x0_y0; + + /* CRTC 2 */ + uint32_t crtc2_gen_cntl; + uint32_t crtc2_h_total_disp; + uint32_t crtc2_h_sync_strt_wid; + uint32_t crtc2_v_total_disp; + uint32_t crtc2_v_sync_strt_wid; + uint32_t crtc2_offset; + uint32_t crtc2_offset_cntl; + uint32_t crtc2_pitch; + uint32_t crtc2_tile_x0_y0; + + uint32_t disp_output_cntl; + uint32_t disp_tv_out_cntl; + uint32_t disp_hw_debug; + uint32_t disp2_merge_cntl; + uint32_t grph2_buffer_cntl; + + /* FP regs */ + uint32_t fp_crtc_h_total_disp; + uint32_t fp_crtc_v_total_disp; + uint32_t fp_gen_cntl; + uint32_t fp2_gen_cntl; + uint32_t fp_h_sync_strt_wid; + uint32_t fp_h2_sync_strt_wid; + uint32_t fp_horz_stretch; + uint32_t fp_horz_vert_active; + uint32_t fp_panel_cntl; + uint32_t fp_v_sync_strt_wid; + uint32_t fp_v2_sync_strt_wid; + uint32_t fp_vert_stretch; + uint32_t lvds_gen_cntl; + uint32_t lvds_pll_cntl; + uint32_t tmds_pll_cntl; + uint32_t tmds_transmitter_cntl; + + /* Computed values for PLL */ + uint32_t dot_clock_freq; + uint32_t pll_output_freq; + int feedback_div; + int reference_div; + int post_div; + + /* PLL registers */ + uint32_t ppll_ref_div; + uint32_t ppll_div_3; + uint32_t htotal_cntl; + uint32_t vclk_ecp_cntl; + + /* Computed values for PLL2 */ + uint32_t dot_clock_freq_2; + uint32_t pll_output_freq_2; + int feedback_div_2; + int reference_div_2; + int post_div_2; + + /* PLL2 registers */ + uint32_t p2pll_ref_div; + uint32_t p2pll_div_0; + uint32_t htotal_cntl2; + uint32_t pixclks_cntl; + + bool palette_valid; + uint32_t palette[256]; + uint32_t palette2[256]; + + uint32_t disp2_req_cntl1; + uint32_t disp2_req_cntl2; + uint32_t dmif_mem_cntl1; + uint32_t disp1_req_cntl1; + + uint32_t fp_2nd_gen_cntl; + uint32_t fp2_2_gen_cntl; + uint32_t tmds2_cntl; + uint32_t tmds2_transmitter_cntl; + + /* TV out registers */ + uint32_t tv_master_cntl; + uint32_t tv_htotal; + uint32_t tv_hsize; + uint32_t tv_hdisp; + uint32_t tv_hstart; + uint32_t tv_vtotal; + uint32_t tv_vdisp; + uint32_t tv_timing_cntl; + uint32_t tv_vscaler_cntl1; + uint32_t tv_vscaler_cntl2; + uint32_t tv_sync_size; + uint32_t tv_vrestart; + uint32_t tv_hrestart; + uint32_t tv_frestart; + uint32_t tv_ftotal; + uint32_t tv_clock_sel_cntl; + uint32_t tv_clkout_cntl; + uint32_t tv_data_delay_a; + uint32_t tv_data_delay_b; + uint32_t tv_dac_cntl; + uint32_t tv_pll_cntl; + uint32_t tv_pll_cntl1; + uint32_t tv_pll_fine_cntl; + uint32_t tv_modulator_cntl1; + uint32_t tv_modulator_cntl2; + uint32_t tv_frame_lock_cntl; + uint32_t tv_pre_dac_mux_cntl; + uint32_t tv_rgb_cntl; + uint32_t tv_y_saw_tooth_cntl; + uint32_t tv_y_rise_cntl; + uint32_t tv_y_fall_cntl; + uint32_t tv_uv_adr; + uint32_t tv_upsamp_and_gain_cntl; + uint32_t tv_gain_limit_settings; + uint32_t tv_linear_gain_settings; + uint32_t tv_crc_cntl; + uint32_t tv_sync_cntl; + uint32_t gpiopad_a; + uint32_t pll_test_cntl; + + uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN]; + uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN]; + + }; struct radeon_mode_info { struct atom_context *atom_context; struct radeon_bios_connector bios_connector[RADEON_MAX_BIOS_CONNECTOR]; struct radeon_pll pll; + struct radeon_legacy_state legacy_state; }; struct radeon_crtc { @@ -178,20 +348,40 @@ struct radeon_encoder { enum radeon_tmds_type tmds; } type; int atom_device; /* atom devices */ + + /* preferred mode */ uint32_t panel_xres, panel_yres; uint32_t hoverplus, hsync_width; uint32_t hblank; uint32_t voverplus, vsync_width; uint32_t vblank; - uint32_t panel_pwr_delay; uint32_t dotclock; + + /* legacy lvds */ + uint16_t panel_vcc_delay; + uint16_t panel_pwr_delay; + uint16_t panel_digon_delay; + uint16_t panel_blon_delay; + uint32_t panel_ref_divider; + uint32_t panel_post_divider; + uint32_t panel_fb_divider; + bool use_bios_dividers; + uint32_t lvds_gen_cntl; + + /* legacy tv dac */ + uint32_t ps2_tvdac_adj; + uint32_t ntsc_tvdac_adj; + uint32_t pal_tvdac_adj; + enum radeon_tv_std tv_std; + + /* legacy int tmds */ + struct radeon_tmds_pll tmds_pll[4]; }; struct radeon_connector { struct drm_connector base; struct radeon_i2c_chan *ddc_bus; int use_digital; - }; struct radeon_framebuffer { @@ -221,6 +411,10 @@ struct drm_encoder *radeon_encoder_lvtma_add(struct drm_device *dev, int bios_in struct drm_encoder *radeon_encoder_atom_dac_add(struct drm_device *dev, int bios_index, int dac_id, int with_tv); struct drm_encoder *radeon_encoder_atom_tmds_add(struct drm_device *dev, int bios_index, int tmds_type); struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); +struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); +struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); +struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); +struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); extern void radeon_crtc_load_lut(struct drm_crtc *crtc); extern void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y); @@ -229,10 +423,22 @@ extern void atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, int x, int y); extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); + +extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height); +extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y); + extern bool radeon_atom_get_clock_info(struct drm_device *dev); extern bool radeon_combios_get_clock_info(struct drm_device *dev); extern void radeon_get_lvds_info(struct radeon_encoder *encoder); extern bool radeon_combios_get_lvds_info(struct radeon_encoder *encoder); +extern bool radeon_combios_get_tmds_info(struct radeon_encoder *encoder); +extern bool radeon_combios_get_tv_info(struct radeon_encoder *encoder); +extern bool radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); struct drm_framebuffer *radeon_user_framebuffer_create(struct drm_device *dev, @@ -245,11 +451,18 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev); void radeon_atombios_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc); -void avivo_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); +void radeon_legacy_init_crtc(struct drm_device *dev, + struct radeon_crtc *radeon_crtc); +void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); void radeon_atom_static_pwrmgt_setup(struct drm_device *dev, int enable); void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable); void radeon_get_clock_info(struct drm_device *dev); extern bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev); +void radeon_rmx_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void radeon_enc_destroy(struct drm_encoder *encoder); + #endif diff --git a/linux-core/radeon_reg.h b/linux-core/radeon_reg.h index 04cfa732..b4f0ac08 100644 --- a/linux-core/radeon_reg.h +++ b/linux-core/radeon_reg.h @@ -374,6 +374,9 @@ # define RADEON_CRTC_ICON_EN (1 << 15) # define RADEON_CRTC_CUR_EN (1 << 16) # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) +# define RADEON_CRTC_CUR_MODE_SHIFT 20 +# define RADEON_CRTC_CUR_MODE_MONO 0 +# define RADEON_CRTC_CUR_MODE_24BPP 2 # define RADEON_CRTC_EXT_DISP_EN (1 << 24) # define RADEON_CRTC_EN (1 << 25) # define RADEON_CRTC_DISP_REQ_EN_B (1 << 26) @@ -556,6 +559,24 @@ # define RADEON_DAC_PDWN_R (1 << 16) # define RADEON_DAC_PDWN_G (1 << 17) # define RADEON_DAC_PDWN_B (1 << 18) +#define RADEON_DISP_PWR_MAN 0x0d08 +# define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0) +# define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4) +# define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8) +# define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8) +# define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8) +# define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8) +# define RADEON_DISP_D3_RST (1 << 16) +# define RADEON_DISP_D3_REG_RST (1 << 17) +# define RADEON_DISP_D3_GRPH_RST (1 << 18) +# define RADEON_DISP_D3_SUBPIC_RST (1 << 19) +# define RADEON_DISP_D3_OV0_RST (1 << 20) +# define RADEON_DISP_D1D2_GRPH_RST (1 << 21) +# define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22) +# define RADEON_DISP_D1D2_OV0_RST (1 << 23) +# define RADEON_DIG_TMDS_ENABLE_RST (1 << 24) +# define RADEON_TV_ENABLE_RST (1 << 25) +# define RADEON_AUTO_PWRUP_EN (1 << 26) #define RADEON_TV_DAC_CNTL 0x088c # define RADEON_TV_DAC_NBLANK (1 << 0) # define RADEON_TV_DAC_NHOLD (1 << 1) @@ -1006,14 +1027,23 @@ # define RADEON_LVDS_DISPLAY_DIS (1 << 1) # define RADEON_LVDS_PANEL_TYPE (1 << 2) # define RADEON_LVDS_PANEL_FORMAT (1 << 3) +# define RADEON_LVDS_NO_FM (0 << 4) +# define RADEON_LVDS_2_GREY (1 << 4) +# define RADEON_LVDS_4_GREY (2 << 4) # define RADEON_LVDS_RST_FM (1 << 6) # define RADEON_LVDS_EN (1 << 7) # define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8 # define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8) # define RADEON_LVDS_BL_MOD_EN (1 << 16) +# define RADEON_LVDS_BL_CLK_SEL (1 << 17) # define RADEON_LVDS_DIGON (1 << 18) # define RADEON_LVDS_BLON (1 << 19) +# define RADEON_LVDS_FP_POL_LOW (1 << 20) +# define RADEON_LVDS_LP_POL_LOW (1 << 21) +# define RADEON_LVDS_DTM_POL_LOW (1 << 22) # define RADEON_LVDS_SEL_CRTC2 (1 << 23) +# define RADEON_LVDS_FPDI_EN (1 << 27) +# define RADEON_LVDS_HSYNC_DELAY_SHIFT 28 #define RADEON_LVDS_PLL_CNTL 0x02d4 # define RADEON_HSYNC_DELAY_SHIFT 28 # define RADEON_HSYNC_DELAY_MASK (0xf << 28) @@ -2095,16 +2125,19 @@ # define RADEON_STENCIL_ENABLE (1 << 7) # define RADEON_Z_ENABLE (1 << 8) # define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9) -# define RADEON_COLOR_FORMAT_ARGB1555 (3 << 10) -# define RADEON_COLOR_FORMAT_RGB565 (4 << 10) -# define RADEON_COLOR_FORMAT_ARGB8888 (6 << 10) -# define RADEON_COLOR_FORMAT_RGB332 (7 << 10) -# define RADEON_COLOR_FORMAT_Y8 (8 << 10) -# define RADEON_COLOR_FORMAT_RGB8 (9 << 10) -# define RADEON_COLOR_FORMAT_YUV422_VYUY (11 << 10) -# define RADEON_COLOR_FORMAT_YUV422_YVYU (12 << 10) -# define RADEON_COLOR_FORMAT_aYUV444 (14 << 10) -# define RADEON_COLOR_FORMAT_ARGB4444 (15 << 10) +# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10 + +# define RADEON_COLOR_FORMAT_ARGB1555 3 +# define RADEON_COLOR_FORMAT_RGB565 4 +# define RADEON_COLOR_FORMAT_ARGB8888 6 +# define RADEON_COLOR_FORMAT_RGB332 7 +# define RADEON_COLOR_FORMAT_Y8 8 +# define RADEON_COLOR_FORMAT_RGB8 9 +# define RADEON_COLOR_FORMAT_YUV422_VYUY 11 +# define RADEON_COLOR_FORMAT_YUV422_YVYU 12 +# define RADEON_COLOR_FORMAT_aYUV444 14 +# define RADEON_COLOR_FORMAT_ARGB4444 15 + # define RADEON_CLRCMP_FLIP_ENABLE (1 << 14) #define RADEON_RB3D_COLOROFFSET 0x1c40 # define RADEON_COLOROFFSET_MASK 0xfffffff0 @@ -3086,6 +3119,10 @@ # define RADEON_CSQ_PRIPIO_INDBM (3 << 28) # define RADEON_CSQ_PRIBM_INDBM (4 << 28) # define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) + +#define R300_CP_RESYNC_ADDR 0x778 +#define R300_CP_RESYNC_DATA 0x77c + #define RADEON_CP_CSQ_STAT 0x07f8 # define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0) # define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8) @@ -3121,6 +3158,7 @@ # define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 # define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) # define RADEON_CP_PACKET0_REG_MASK 0x000007ff +# define R300_CP_PACKET0_REG_MASK 0x00001fff # define RADEON_CP_PACKET1_REG0_MASK 0x000007ff # define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 @@ -3565,14 +3603,14 @@ #define AVIVO_D1GRPH_X_END 0x6134 #define AVIVO_D1GRPH_Y_END 0x6138 #define AVIVO_D1GRPH_UPDATE 0x6144 -# define AVIVO_D1GRPH_UPDATE_LOCK (1<<16) +# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 #define AVIVO_D1CUR_CONTROL 0x6400 -# define AVIVO_D1CURSOR_EN (1<<0) -# define AVIVO_D1CURSOR_MODE_SHIFT 8 -# define AVIVO_D1CURSOR_MODE_MASK (0x3<<8) -# define AVIVO_D1CURSOR_MODE_24BPP (0x2) +# define AVIVO_D1CURSOR_EN (1 << 0) +# define AVIVO_D1CURSOR_MODE_SHIFT 8 +# define AVIVO_D1CURSOR_MODE_MASK (3 << 8) +# define AVIVO_D1CURSOR_MODE_24BPP 2 #define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408 #define AVIVO_D1CUR_SIZE 0x6410 #define AVIVO_D1CUR_POSITION 0x6414 @@ -4419,6 +4457,8 @@ # define R300_ENDIAN_SWAP_HALF_DWORD (3 << 0) # define R300_MACRO_TILE (1 << 2) +#define R300_TX_BORDER_COLOR_0 0x45c0 + #define R300_TX_ENABLE 0x4104 # define R300_TEX_0_ENABLE (1 << 0) # define R300_TEX_1_ENABLE (1 << 1) @@ -4705,7 +4745,24 @@ # define R300_READ_ENABLE (1 << 2) #define R300_RB3D_ABLENDCNTL 0x4e08 #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c - +#define R300_RB3D_COLOROFFSET0 0x4e28 +#define R300_RB3D_COLORPITCH0 0x4e38 +# define R300_COLORTILE (1 << 16) +# define R300_COLORENDIAN_WORD (1 << 19) +# define R300_COLORENDIAN_DWORD (2 << 19) +# define R300_COLORENDIAN_HALF_DWORD (3 << 19) +# define R300_COLORFORMAT_ARGB1555 (3 << 21) +# define R300_COLORFORMAT_RGB565 (4 << 21) +# define R300_COLORFORMAT_ARGB8888 (6 << 21) +# define R300_COLORFORMAT_ARGB32323232 (7 << 21) +# define R300_COLORFORMAT_I8 (9 << 21) +# define R300_COLORFORMAT_ARGB16161616 (10 << 21) +# define R300_COLORFORMAT_VYUY (11 << 21) +# define R300_COLORFORMAT_YVYU (12 << 21) +# define R300_COLORFORMAT_UV88 (13 << 21) +# define R300_COLORFORMAT_ARGB4444 (15 << 21) + +#define R300_RB3D_AARESOLVE_CTL 0x4e88 #define R300_RB3D_COLOR_CHANNEL_MASK 0x4e0c # define R300_BLUE_MASK_EN (1 << 0) # define R300_GREEN_MASK_EN (1 << 1) diff --git a/shared-core/drm.h b/shared-core/drm.h index dd03bf9b..b177dfe5 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -675,324 +675,6 @@ struct drm_set_version { int drm_dd_minor; }; - -#define DRM_FENCE_FLAG_EMIT 0x00000001 -#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 -/** - * On hardware with no interrupt events for operation completion, - * indicates that the kernel should sleep while waiting for any blocking - * operation to complete rather than spinning. - * - * Has no effect otherwise. - */ -#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 -#define DRM_FENCE_FLAG_NO_USER 0x00000010 - -/* Reserved for driver use */ -#define DRM_FENCE_MASK_DRIVER 0xFF000000 - -#define DRM_FENCE_TYPE_EXE 0x00000001 - -struct drm_fence_arg { - unsigned int handle; - unsigned int fence_class; - unsigned int type; - unsigned int flags; - unsigned int signaled; - unsigned int error; - unsigned int sequence; - unsigned int pad64; - uint64_t expand_pad[2]; /*Future expansion */ -}; - -/* Buffer permissions, referring to how the GPU uses the buffers. - * these translate to fence types used for the buffers. - * Typically a texture buffer is read, A destination buffer is write and - * a command (batch-) buffer is exe. Can be or-ed together. - */ - -#define DRM_BO_FLAG_READ (1ULL << 0) -#define DRM_BO_FLAG_WRITE (1ULL << 1) -#define DRM_BO_FLAG_EXE (1ULL << 2) - -/* - * All of the bits related to access mode - */ -#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) -/* - * Status flags. Can be read to determine the actual state of a buffer. - * Can also be set in the buffer mask before validation. - */ - -/* - * Mask: Never evict this buffer. Not even with force. This type of buffer is only - * available to root and must be manually removed before buffer manager shutdown - * or lock. - * Flags: Acknowledge - */ -#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) - -/* - * Mask: Require that the buffer is placed in mappable memory when validated. - * If not set the buffer may or may not be in mappable memory when validated. - * Flags: If set, the buffer is in mappable memory. - */ -#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) - -/* Mask: The buffer should be shareable with other processes. - * Flags: The buffer is shareable with other processes. - */ -#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) - -/* Mask: If set, place the buffer in cache-coherent memory if available. - * If clear, never place the buffer in cache coherent memory if validated. - * Flags: The buffer is currently in cache-coherent memory. - */ -#define DRM_BO_FLAG_CACHED (1ULL << 7) - -/* Mask: Make sure that every time this buffer is validated, - * it ends up on the same location provided that the memory mask is the same. - * The buffer will also not be evicted when claiming space for - * other buffers. Basically a pinned buffer but it may be thrown out as - * part of buffer manager shutdown or locking. - * Flags: Acknowledge. - */ -#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) - -/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction - * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART - * with unsnooped PTEs instead of snooped, by using chipset-specific cache - * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, - * as the eviction to local memory (TTM unbind) on map is just a side effect - * to prevent aggressive cache prefetch from the GPU disturbing the cache - * management that the DRM is doing. - * - * Flags: Acknowledge. - * Buffers allocated with this flag should not be used for suballocators - * This type may have issues on CPUs with over-aggressive caching - * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 - */ -#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) - - -/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. - * Flags: Acknowledge. - */ -#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) - -/* - * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. - * Flags: Acknowledge. - */ -#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) -#define DRM_BO_FLAG_TILE (1ULL << 15) - -/* - * Memory type flags that can be or'ed together in the mask, but only - * one appears in flags. - */ - -/* System memory */ -#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) -/* Translation table memory */ -#define DRM_BO_FLAG_MEM_TT (1ULL << 25) -/* Vram memory */ -#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) -/* Up to the driver to define. */ -#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) -#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) -#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) -#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) -#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) -/* We can add more of these now with a 64-bit flag type */ - -/* - * This is a mask covering all of the memory type flags; easier to just - * use a single constant than a bunch of | values. It covers - * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 - */ -#define DRM_BO_MASK_MEM 0x00000000FF000000ULL -/* - * This adds all of the CPU-mapping options in with the memory - * type to label all bits which change how the page gets mapped - */ -#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ - DRM_BO_FLAG_CACHED_MAPPED | \ - DRM_BO_FLAG_CACHED | \ - DRM_BO_FLAG_MAPPABLE) - -/* Driver-private flags */ -#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL - -/* - * Don't block on validate and map. Instead, return EBUSY. - */ -#define DRM_BO_HINT_DONT_BLOCK 0x00000002 -/* - * Don't place this buffer on the unfenced list. This means - * that the buffer will not end up having a fence associated - * with it as a result of this operation - */ -#define DRM_BO_HINT_DONT_FENCE 0x00000004 -/** - * On hardware with no interrupt events for operation completion, - * indicates that the kernel should sleep while waiting for any blocking - * operation to complete rather than spinning. - * - * Has no effect otherwise. - */ -#define DRM_BO_HINT_WAIT_LAZY 0x00000008 -/* - * The client has compute relocations refering to this buffer using the - * offset in the presumed_offset field. If that offset ends up matching - * where this buffer lands, the kernel is free to skip executing those - * relocations - */ -#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 - -#define DRM_BO_INIT_MAGIC 0xfe769812 -#define DRM_BO_INIT_MAJOR 1 -#define DRM_BO_INIT_MINOR 0 -#define DRM_BO_INIT_PATCH 0 - - -struct drm_bo_info_req { - uint64_t mask; - uint64_t flags; - unsigned int handle; - unsigned int hint; - unsigned int fence_class; - unsigned int desired_tile_stride; - unsigned int tile_info; - unsigned int pad64; - uint64_t presumed_offset; -}; - -struct drm_bo_create_req { - uint64_t flags; - uint64_t size; - uint64_t buffer_start; - unsigned int hint; - unsigned int page_alignment; -}; - - -/* - * Reply flags - */ - -#define DRM_BO_REP_BUSY 0x00000001 - -struct drm_bo_info_rep { - uint64_t flags; - uint64_t proposed_flags; - uint64_t size; - uint64_t offset; - uint64_t arg_handle; - uint64_t buffer_start; - unsigned int handle; - unsigned int fence_flags; - unsigned int rep_flags; - unsigned int page_alignment; - unsigned int desired_tile_stride; - unsigned int hw_tile_stride; - unsigned int tile_info; - unsigned int pad64; - uint64_t expand_pad[4]; /*Future expansion */ -}; - -struct drm_bo_arg_rep { - struct drm_bo_info_rep bo_info; - int ret; - unsigned int pad64; -}; - -struct drm_bo_create_arg { - union { - struct drm_bo_create_req req; - struct drm_bo_info_rep rep; - } d; -}; - -struct drm_bo_handle_arg { - unsigned int handle; -}; - -struct drm_bo_reference_info_arg { - union { - struct drm_bo_handle_arg req; - struct drm_bo_info_rep rep; - } d; -}; - -struct drm_bo_map_wait_idle_arg { - union { - struct drm_bo_info_req req; - struct drm_bo_info_rep rep; - } d; -}; - -struct drm_bo_op_req { - enum { - drm_bo_validate, - drm_bo_fence, - drm_bo_ref_fence, - } op; - unsigned int arg_handle; - struct drm_bo_info_req bo_req; -}; - - -struct drm_bo_op_arg { - uint64_t next; - union { - struct drm_bo_op_req req; - struct drm_bo_arg_rep rep; - } d; - int handled; - unsigned int pad64; -}; - - -#define DRM_BO_MEM_LOCAL 0 -#define DRM_BO_MEM_TT 1 -#define DRM_BO_MEM_VRAM 2 -#define DRM_BO_MEM_PRIV0 3 -#define DRM_BO_MEM_PRIV1 4 -#define DRM_BO_MEM_PRIV2 5 -#define DRM_BO_MEM_PRIV3 6 -#define DRM_BO_MEM_PRIV4 7 - -#define DRM_BO_MEM_TYPES 8 /* For now. */ - -#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) -#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) - -struct drm_bo_version_arg { - uint32_t major; - uint32_t minor; - uint32_t patchlevel; -}; - -struct drm_mm_type_arg { - unsigned int mem_type; - unsigned int lock_flags; -}; - -struct drm_mm_init_arg { - unsigned int magic; - unsigned int major; - unsigned int minor; - unsigned int mem_type; - uint64_t p_offset; - uint64_t p_size; -}; - -struct drm_mm_info_arg { - unsigned int mem_type; - uint64_t p_size; -}; - struct drm_gem_close { /** Handle of the object to be closed. */ uint32_t handle; @@ -1337,31 +1019,6 @@ struct drm_mode_crtc_lut { #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) -#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) -#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) -#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) -#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) - -#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) - -#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) -#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) -#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) -#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) -#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) -#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) -#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg) - #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA2, struct drm_mode_get_connector) @@ -1439,11 +1096,6 @@ typedef struct drm_agp_binding drm_agp_binding_t; typedef struct drm_agp_info drm_agp_info_t; typedef struct drm_scatter_gather drm_scatter_gather_t; typedef struct drm_set_version drm_set_version_t; - -typedef struct drm_fence_arg drm_fence_arg_t; -typedef struct drm_mm_type_arg drm_mm_type_arg_t; -typedef struct drm_mm_init_arg drm_mm_init_arg_t; -typedef enum drm_bo_type drm_bo_type_t; #endif #endif diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 1fdc5e17..ddc9cd5a 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -135,7 +135,7 @@ int i915_dma_cleanup(struct drm_device * dev) return 0; } -#if defined(I915_HAVE_BUFFER) && defined(DRI2) +#if defined(DRI2) #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16) #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff) #define DRI2_SAREA_BLOCK_NEXT(p) \ @@ -213,12 +213,7 @@ static int i915_initialize(struct drm_device * dev, } } -#ifdef I915_HAVE_BUFFER - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; - } -#endif - + if (init->ring_size != 0) { dev_priv->ring.Size = init->ring_size; dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; @@ -253,10 +248,6 @@ static int i915_initialize(struct drm_device * dev, */ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; -#ifdef I915_HAVE_BUFFER - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - mutex_init(&dev_priv->cmdbuf_mutex); - } #ifdef DRI2 if (init->func == I915_INIT_DMA2) { int ret = setup_dri2_sarea(dev, file_priv, init); @@ -267,7 +258,6 @@ static int i915_initialize(struct drm_device * dev, } } #endif /* DRI2 */ -#endif /* I915_HAVE_BUFFER */ return 0; } @@ -533,9 +523,6 @@ int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) static int i915_dispatch_cmdbuffer(struct drm_device * dev, struct drm_i915_cmdbuffer * cmd) { -#ifdef I915_HAVE_FENCE - struct drm_i915_private *dev_priv = dev->dev_private; -#endif int nbox = cmd->num_cliprects; int i = 0, count, ret; @@ -562,10 +549,6 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, } i915_emit_breadcrumb(dev); -#ifdef I915_HAVE_FENCE - if (unlikely((dev_priv->counter & 0xFF) == 0)) - drm_fence_flush_old(dev, 0, dev_priv->counter); -#endif return 0; } @@ -616,10 +599,6 @@ int i915_dispatch_batchbuffer(struct drm_device * dev, } i915_emit_breadcrumb(dev); -#ifdef I915_HAVE_FENCE - if (unlikely((dev_priv->counter & 0xFF) == 0)) - drm_fence_flush_old(dev, 0, dev_priv->counter); -#endif return 0; } @@ -678,7 +657,6 @@ static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync) void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int i; @@ -692,10 +670,6 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) i915_do_dispatch_flip(dev, i, sync); i915_emit_breadcrumb(dev); -#ifdef I915_HAVE_FENCE - if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0))) - drm_fence_flush_old(dev, 0, dev_priv->counter); -#endif } int i915_quiescent(struct drm_device *dev) @@ -1049,9 +1023,6 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), -#ifdef I915_HAVE_BUFFER - DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH), -#endif DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 53087b57..ab13cd4a 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -380,58 +380,6 @@ typedef struct drm_i915_hws_addr { uint64_t addr; } drm_i915_hws_addr_t; -/* - * Relocation header is 4 uint32_ts - * 0 - 32 bit reloc count - * 1 - 32-bit relocation type - * 2-3 - 64-bit user buffer handle ptr for another list of relocs. - */ -#define I915_RELOC_HEADER 4 - -/* - * type 0 relocation has 4-uint32_t stride - * 0 - offset into buffer - * 1 - delta to add in - * 2 - buffer handle - * 3 - reserved (for optimisations later). - */ -/* - * type 1 relocation has 4-uint32_t stride. - * Hangs off the first item in the op list. - * Performed after all valiations are done. - * Try to group relocs into the same relocatee together for - * performance reasons. - * 0 - offset into buffer - * 1 - delta to add in - * 2 - buffer index in op list. - * 3 - relocatee index in op list. - */ -#define I915_RELOC_TYPE_0 0 -#define I915_RELOC0_STRIDE 4 -#define I915_RELOC_TYPE_1 1 -#define I915_RELOC1_STRIDE 4 - - -struct drm_i915_op_arg { - uint64_t next; - uint64_t reloc_ptr; - int handled; - unsigned int pad64; - union { - struct drm_bo_op_req req; - struct drm_bo_arg_rep rep; - } d; - -}; - -struct drm_i915_execbuffer { - uint64_t ops_list; - uint32_t num_buffers; - struct drm_i915_batchbuffer batch; - drm_context_t context; /* for lockless use in the future */ - struct drm_fence_arg fence_arg; -}; - struct drm_i915_gem_init { /** * Beginning offset in the GTT to be managed by the DRM memory diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c index b21351c0..7b88cc8d 100644 --- a/shared-core/i915_init.c +++ b/shared-core/i915_init.c @@ -437,7 +437,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) return; if (master_priv->sarea) - drm_rmmap(dev, master_priv->sarea); + drm_rmmap_locked(dev, master_priv->sarea); drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index 05a76ef1..725829b2 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -166,8 +166,6 @@ void r300_init_reg_flags(struct drm_device *dev) for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ r300_reg_flags[i]|=(mark); -#define MARK_SAFE 1 -#define MARK_CHECK_OFFSET 2 #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE) @@ -247,6 +245,11 @@ void r300_init_reg_flags(struct drm_device *dev) ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8); ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); + ADD_RANGE(R500_SU_REG_DEST, 1); + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV410) { + ADD_RANGE(R300_DST_PIPE_CONFIG, 1); + } + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { ADD_RANGE(R500_VAP_INDEX_OFFSET, 1); ADD_RANGE(R500_US_CONFIG, 2); @@ -257,6 +260,7 @@ void r300_init_reg_flags(struct drm_device *dev) ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2); ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2); ADD_RANGE(R500_ZB_FIFO_SIZE, 2); + ADD_RANGE(R500_GA_US_VECTOR_INDEX, 2); } else { ADD_RANGE(R300_PFS_CNTL_0, 3); ADD_RANGE(R300_PFS_NODE_0, 4); @@ -269,9 +273,46 @@ void r300_init_reg_flags(struct drm_device *dev) ADD_RANGE(R300_RS_ROUTE_0, 8); } + + /* add 2d blit engine registers for DDX */ + ADD_RANGE(RADEON_SRC_Y_X, 3); /* 1434, 1438, 143c, + SRC_Y_X, DST_Y_X, DST_HEIGHT_WIDTH + */ + ADD_RANGE(RADEON_DP_GUI_MASTER_CNTL, 1); /* 146c */ + ADD_RANGE(RADEON_DP_BRUSH_BKGD_CLR, 2); /* 1478, 147c */ + ADD_RANGE(RADEON_DP_SRC_FRGD_CLR, 2); /* 15d8, 15dc */ + ADD_RANGE(RADEON_DP_CNTL, 1); /* 16c0 */ + ADD_RANGE(RADEON_DP_WRITE_MASK, 1); /* 16cc */ + ADD_RANGE(RADEON_DEFAULT_SC_BOTTOM_RIGHT, 1); /* 16e8 */ + + ADD_RANGE(RADEON_DSTCACHE_CTLSTAT, 1); + ADD_RANGE(RADEON_WAIT_UNTIL, 1); + + ADD_RANGE_MARK(RADEON_DST_OFFSET, 1, MARK_CHECK_OFFSET); + ADD_RANGE_MARK(RADEON_SRC_OFFSET, 1, MARK_CHECK_OFFSET); + + ADD_RANGE_MARK(RADEON_DST_PITCH_OFFSET, 1, MARK_CHECK_OFFSET); + ADD_RANGE_MARK(RADEON_SRC_PITCH_OFFSET, 1, MARK_CHECK_OFFSET); + + /* TODO SCISSOR */ + ADD_RANGE_MARK(R300_SC_SCISSOR0, 2, MARK_CHECK_SCISSOR); + + ADD_RANGE(R300_SC_CLIP_0_A, 2); + ADD_RANGE(R300_SC_CLIP_RULE, 1); + ADD_RANGE(R300_SC_SCREENDOOR, 1); + + ADD_RANGE(R300_VAP_PVS_CODE_CNTL_0, 4); + ADD_RANGE(R300_VAP_PVS_VECTOR_INDX_REG, 2); + + if (dev_priv->chip_family <= CHIP_RV280) { + ADD_RANGE(RADEON_RE_TOP_LEFT, 1); + ADD_RANGE(RADEON_RE_WIDTH_HEIGHT, 1); + ADD_RANGE(RADEON_AUX_SC_CNTL, 1); + ADD_RANGE(RADEON_RB3D_DSTCACHE_CTLSTAT, 1); + } } -static __inline__ int r300_check_range(unsigned reg, int count) +int r300_check_range(unsigned reg, int count) { int i; if (reg & ~0xffff) @@ -282,6 +323,13 @@ static __inline__ int r300_check_range(unsigned reg, int count) return 0; } +int r300_get_reg_flags(unsigned reg) +{ + if (reg & ~0xffff) + return -1; + return r300_reg_flags[(reg >> 2)]; +} + static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * dev_priv, drm_radeon_kcmd_buffer_t diff --git a/shared-core/r300_reg.h b/shared-core/r300_reg.h index d35dd39d..9e9cb526 100644 --- a/shared-core/r300_reg.h +++ b/shared-core/r300_reg.h @@ -129,15 +129,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. /* END: Wild guesses */ #define R300_SE_VTE_CNTL 0x20b0 -# define R300_VPORT_X_SCALE_ENA 0x00000001 -# define R300_VPORT_X_OFFSET_ENA 0x00000002 -# define R300_VPORT_Y_SCALE_ENA 0x00000004 -# define R300_VPORT_Y_OFFSET_ENA 0x00000008 -# define R300_VPORT_Z_SCALE_ENA 0x00000010 -# define R300_VPORT_Z_OFFSET_ENA 0x00000020 -# define R300_VTX_XY_FMT 0x00000100 -# define R300_VTX_Z_FMT 0x00000200 -# define R300_VTX_W0_FMT 0x00000400 # define R300_VTX_W0_NORMALIZE 0x00000800 # define R300_VTX_ST_DENORMALIZED 0x00001000 @@ -493,7 +484,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */ # define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24 -#define R300_GB_SELECT 0x401C + # define R300_GB_FOG_SELECT_C0A 0 # define R300_GB_FOG_SELECT_C1A 1 # define R300_GB_FOG_SELECT_C2A 2 @@ -955,7 +946,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. /* 32 bit chroma key */ #define R300_TX_CHROMA_KEY_0 0x4580 /* ff00ff00 == { 0, 1.0, 0, 1.0 } */ -#define R300_TX_BORDER_COLOR_0 0x45C0 /* END: Texture specification */ @@ -1340,7 +1330,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. /* gap */ -#define R300_RB3D_COLOROFFSET0 0x4E28 # define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */ #define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */ #define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */ @@ -1352,7 +1341,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. * Bit 17: 4x2 tiles * Bit 18: Extremely weird tile like, but some pixels duplicated? */ -#define R300_RB3D_COLORPITCH0 0x4E38 # define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ # define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ # define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index 6fbcfeab..e30696fc 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -107,7 +107,33 @@ u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) return RADEON_READ(RADEON_MC_FB_LOCATION); } -static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) +void radeon_read_agp_location(drm_radeon_private_t *dev_priv, u32 *agp_lo, u32 *agp_hi) +{ + if (dev_priv->chip_family == CHIP_RV770) { + + } else if (dev_priv->chip_family == CHIP_R600) { + *agp_lo = RADEON_READ(R600_MC_VM_AGP_BOT); + *agp_hi = RADEON_READ(R600_MC_VM_AGP_TOP); + } else if (dev_priv->chip_family == CHIP_RV515) { + *agp_lo = radeon_read_mc_reg(dev_priv, RV515_MC_FB_LOCATION); + *agp_hi = 0; + } else if (dev_priv->chip_family == CHIP_RS600) { + *agp_lo = 0; + *agp_hi = 0; + } else if (dev_priv->chip_family == CHIP_RS690 || + dev_priv->chip_family == CHIP_RS740) { + *agp_lo = radeon_read_mc_reg(dev_priv, RS690_MC_AGP_LOCATION); + *agp_hi = 0; + } else if (dev_priv->chip_family >= CHIP_R520) { + *agp_lo = radeon_read_mc_reg(dev_priv, R520_MC_AGP_LOCATION); + *agp_hi = 0; + } else { + *agp_lo = RADEON_READ(RADEON_MC_FB_LOCATION); + *agp_hi = 0; + } +} + +void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) { if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); @@ -119,7 +145,7 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); } -static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) +static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc, u32 agp_loc_hi) { if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); @@ -190,7 +216,7 @@ void radeon_pll_errata_after_data(struct drm_radeon_private *dev_priv) } } -int RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr) +u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr) { uint32_t data; @@ -661,8 +687,8 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, ((dev_priv->gart_vm_start - 1) & 0xffff0000) | (dev_priv->fb_location >> 16)); - if (dev_priv->mm.ring) { - ring_start = dev_priv->mm.ring->offset + + if (dev_priv->mm.ring.bo) { + ring_start = dev_priv->mm.ring.bo->offset + dev_priv->gart_vm_start; } else #if __OS_HAS_AGP @@ -672,7 +698,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, radeon_write_agp_location(dev_priv, (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | - (dev_priv->gart_vm_start >> 16))); + (dev_priv->gart_vm_start >> 16)), 0); ring_start = (dev_priv->cp_ring->offset - dev->agp->base @@ -695,9 +721,9 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, dev_priv->ring.tail = cur_read_ptr; - if (dev_priv->mm.ring_read_ptr) { + if (dev_priv->mm.ring_read.bo) { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, - dev_priv->mm.ring_read_ptr->offset + + dev_priv->mm.ring_read.bo->offset + dev_priv->gart_vm_start); } else #if __OS_HAS_AGP @@ -748,9 +774,9 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) + RADEON_SCRATCH_REG_OFFSET); - if (dev_priv->mm.ring_read_ptr) + if (dev_priv->mm.ring_read.bo) dev_priv->scratch = ((__volatile__ u32 *) - dev_priv->mm.ring_read_ptr_map.virtual + + dev_priv->mm.ring_read.kmap.virtual + (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); else dev_priv->scratch = ((__volatile__ u32 *) @@ -775,12 +801,18 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, radeon_do_wait_for_idle(dev_priv); /* Sync everything up */ + if (dev_priv->chip_family > CHIP_RV280) { RADEON_WRITE(RADEON_ISYNC_CNTL, (RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI)); - + } else { + RADEON_WRITE(RADEON_ISYNC_CNTL, + (RADEON_ISYNC_ANY2D_IDLE3D | + RADEON_ISYNC_ANY3D_IDLE2D | + RADEON_ISYNC_WAIT_IDLEGUI)); + } } static void radeon_test_writeback(drm_radeon_private_t * dev_priv) @@ -788,8 +820,8 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv) u32 tmp; void *ring_read_ptr; - if (dev_priv->mm.ring_read_ptr) - ring_read_ptr = dev_priv->mm.ring_read_ptr_map.virtual; + if (dev_priv->mm.ring_read.bo) + ring_read_ptr = dev_priv->mm.ring_read.kmap.virtual; else ring_read_ptr = dev_priv->ring_rptr->handle; @@ -867,7 +899,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 0xffff0000) | (dev_priv->gart_vm_start >> 16)); - radeon_write_agp_location(dev_priv, temp); + radeon_write_agp_location(dev_priv, temp, 0); temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | @@ -915,7 +947,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) dev_priv->gart_vm_start + dev_priv->gart_size - 1); - radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ + radeon_write_agp_location(dev_priv, 0xffffffc0, 0); /* ?? */ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, RADEON_PCIE_TX_GART_EN); @@ -959,7 +991,7 @@ void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) /* Turn off AGP aperture -- is this required for PCI GART? */ - radeon_write_agp_location(dev_priv, 0xffffffc0); + radeon_write_agp_location(dev_priv, 0xffffffc0, 0); RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ } else { RADEON_WRITE(RADEON_AIC_CNTL, @@ -1353,8 +1385,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) if (dev_priv->gart_info.bus_addr) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); - if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) - DRM_ERROR("failed to cleanup PCI GART!\n"); + drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info); } if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) @@ -1362,6 +1393,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) if (dev_priv->pcigart_offset_set == 1) { drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = NULL; + dev_priv->pcigart_offset_set = 0; } } } @@ -1553,8 +1585,10 @@ void radeon_do_release(struct drm_device * dev) radeon_mem_takedown(&(dev_priv->gart_heap)); radeon_mem_takedown(&(dev_priv->fb_heap)); - - radeon_gem_mm_fini(dev); + if (dev_priv->user_mm_enable) { + radeon_gem_mm_fini(dev); + dev_priv->user_mm_enable = false; + } /* deallocate kernel resources */ radeon_do_cleanup_cp(dev); @@ -2270,6 +2304,7 @@ static void radeon_set_dynamic_clock(struct drm_device *dev, int mode) int radeon_modeset_cp_init(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; + uint32_t tmp; /* allocate a ring and ring rptr bits from GART space */ /* these are allocated in GEM files */ @@ -2278,8 +2313,8 @@ int radeon_modeset_cp_init(struct drm_device *dev) dev_priv->ring.size = RADEON_DEFAULT_RING_SIZE; dev_priv->cp_mode = RADEON_CSQ_PRIBM_INDBM; - dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual; - dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual + + dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual; + dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual + dev_priv->ring.size / sizeof(u32); dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8); dev_priv->ring.rptr_update = 4096; @@ -2289,14 +2324,21 @@ int radeon_modeset_cp_init(struct drm_device *dev) dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; - dev_priv->new_memmap = 1; + dev_priv->new_memmap = true; + r300_init_reg_flags(dev); + radeon_cp_load_microcode(dev_priv); - DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring->offset, dev_priv->mm.ring_read_ptr->offset); + DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring.bo->offset, dev_priv->mm.ring_read.bo->offset); radeon_cp_init_ring_buffer(dev, dev_priv); + /* need to enable BUS mastering in Buscntl */ + tmp = RADEON_READ(RADEON_BUS_CNTL); + tmp &= ~RADEON_BUS_MASTER_DIS; + RADEON_WRITE(RADEON_BUS_CNTL, tmp); + radeon_do_engine_reset(dev); radeon_test_writeback(dev_priv); @@ -2367,8 +2409,8 @@ int radeon_modeset_preinit(struct drm_device *dev) if (dev_priv->is_atom_bios) { dev_priv->mode_info.atom_context = atom_parse(&card, dev_priv->bios); - radeon_get_clock_info(dev); } + radeon_get_clock_info(dev); return 0; } @@ -2466,10 +2508,6 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) if (drm_core_check_feature(dev, DRIVER_MODESET)) { - dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; - dev_priv->fb_size = - ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) - - dev_priv->fb_location; radeon_gem_mm_init(dev); radeon_modeset_init(dev); @@ -2523,7 +2561,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) master_priv->sarea_priv = NULL; if (master_priv->sarea) - drm_rmmap(dev, master_priv->sarea); + drm_rmmap_locked(dev, master_priv->sarea); drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); diff --git a/shared-core/radeon_cs.c b/shared-core/radeon_cs.c new file mode 100644 index 00000000..b0c4abe8 --- /dev/null +++ b/shared-core/radeon_cs.c @@ -0,0 +1,411 @@ +/* + * Copyright 2008 Jerome Glisse. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Jerome Glisse <glisse@freedesktop.org> + */ +#include "drmP.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "r300_reg.h" + +int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + struct drm_radeon_cs *cs = data; + uint32_t *packets = NULL; + uint32_t cs_id; + uint32_t card_offset; + void *ib = NULL; + long size; + int r; + RING_LOCALS; + + /* set command stream id to 0 which is fake id */ + cs_id = 0; + DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t)); + + if (dev_priv == NULL) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + if (!cs->dwords) { + return 0; + } + /* limit cs to 64K ib */ + if (cs->dwords > (16 * 1024)) { + return -EINVAL; + } + /* copy cs from userspace maybe we should copy into ib to save + * one copy but ib will be mapped wc so not good for cmd checking + * somethings worth testing i guess (Jerome) + */ + size = cs->dwords * sizeof(uint32_t); + packets = drm_alloc(size, DRM_MEM_DRIVER); + if (packets == NULL) { + return -ENOMEM; + } + if (DRM_COPY_FROM_USER(packets, (void __user *)(unsigned long)cs->packets, size)) { + r = -EFAULT; + goto out; + } + /* get ib */ + r = dev_priv->cs.ib_get(dev, &ib, cs->dwords, &card_offset); + if (r) { + goto out; + } + + /* now parse command stream */ + r = dev_priv->cs.parse(dev, fpriv, ib, packets, cs->dwords); + if (r) { + goto out; + } + + BEGIN_RING(4); + OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1)); + OUT_RING(card_offset); + OUT_RING(cs->dwords); + OUT_RING(CP_PACKET2()); + ADVANCE_RING(); + + /* emit cs id sequence */ + dev_priv->cs.id_emit(dev, &cs_id); + COMMIT_RING(); + + DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t)); +out: + dev_priv->cs.ib_free(dev, ib, cs->dwords); + drm_free(packets, size, DRM_MEM_DRIVER); + return r; +} + +/* for non-mm */ +static int radeon_nomm_relocate(struct drm_device *dev, struct drm_file *file_priv, uint32_t *reloc, uint32_t *offset) +{ + *offset = reloc[1]; + return 0; +} +#define RELOC_SIZE 2 +#define RADEON_2D_OFFSET_MASK 0x3fffff + +static __inline__ int radeon_cs_relocate_packet0(struct drm_device *dev, struct drm_file *file_priv, + uint32_t *packets, uint32_t offset_dw) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + uint32_t hdr = packets[offset_dw]; + uint32_t reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2; + uint32_t val = packets[offset_dw + 1]; + uint32_t packet3_hdr = packets[offset_dw+2]; + uint32_t tmp, offset; + int ret; + + /* this is too strict we may want to expand the length in the future and have + old kernels ignore it. */ + if (packet3_hdr != (RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16))) { + DRM_ERROR("Packet 3 was %x should have been %x\n", packet3_hdr, RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16)); + return -EINVAL; + } + + switch(reg) { + case RADEON_DST_PITCH_OFFSET: + case RADEON_SRC_PITCH_OFFSET: + /* pass in the start of the reloc */ + ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + 2, &offset); + if (ret) + return ret; + tmp = (val & RADEON_2D_OFFSET_MASK) << 10; + val &= ~RADEON_2D_OFFSET_MASK; + offset += tmp; + offset >>= 10; + val |= offset; + break; + case R300_RB3D_COLOROFFSET0: + case R300_ZB_DEPTHOFFSET: + case R300_TX_OFFSET_0: + case R300_TX_OFFSET_0+4: + ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + 2, &offset); + if (ret) + return ret; + + offset &= 0xffffffe0; + val += offset; + break; + default: + break; + } + + packets[offset_dw + 1] = val; + return 0; +} + +static int radeon_cs_relocate_packet3(struct drm_device *dev, struct drm_file *file_priv, + uint32_t *packets, uint32_t offset_dw) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + uint32_t hdr = packets[offset_dw]; + int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16; + uint32_t reg = hdr & 0xff00; + uint32_t offset, val, tmp; + int ret; + + switch(reg) { + case RADEON_CNTL_HOSTDATA_BLT: + { + val = packets[offset_dw + 2]; + ret = dev_priv->cs.relocate(dev, file_priv, packets + offset_dw + num_dw + 2, &offset); + if (ret) + return ret; + + tmp = (val & RADEON_2D_OFFSET_MASK) << 10; + val &= ~RADEON_2D_OFFSET_MASK; + offset += tmp; + offset >>= 10; + val |= offset; + + packets[offset_dw + 2] = val; + } + default: + return -EINVAL; + } + return 0; +} + +static __inline__ int radeon_cs_check_offset(struct drm_device *dev, + uint32_t reg, uint32_t val) +{ + uint32_t offset; + + switch(reg) { + case RADEON_DST_PITCH_OFFSET: + case RADEON_SRC_PITCH_OFFSET: + offset = val & ((1 << 22) - 1); + offset <<= 10; + break; + case R300_RB3D_COLOROFFSET0: + case R300_ZB_DEPTHOFFSET: + offset = val; + break; + case R300_TX_OFFSET_0: + case R300_TX_OFFSET_0+4: + offset = val & 0xffffffe0; + break; + } + + return 0; +} + +int radeon_cs_packet0(struct drm_device *dev, struct drm_file *file_priv, + uint32_t *packets, uint32_t offset_dw) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + uint32_t hdr = packets[offset_dw]; + int num_dw = ((hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16) + 2; + int need_reloc = 0; + int reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2; + int count_dw = 1; + int ret; + + while (count_dw < num_dw) { + /* need to have something like the r300 validation here - + list of allowed registers */ + int flags; + + ret = r300_check_range(reg, 1); + switch(ret) { + case -1: + DRM_ERROR("Illegal register %x\n", reg); + break; + case 0: + break; + case 1: + flags = r300_get_reg_flags(reg); + if (flags == MARK_CHECK_OFFSET) { + if (num_dw > 2) { + DRM_ERROR("Cannot relocate inside type stream of reg0 packets\n"); + return -EINVAL; + } + + ret = radeon_cs_relocate_packet0(dev, file_priv, packets, offset_dw); + if (ret) + return ret; + DRM_DEBUG("need to relocate %x %d\n", reg, flags); + /* okay it should be followed by a NOP */ + } else if (flags == MARK_CHECK_SCISSOR) { + DRM_DEBUG("need to validate scissor %x %d\n", reg, flags); + } else { + DRM_DEBUG("illegal register %x %d\n", reg, flags); + return -EINVAL; + } + break; + } + count_dw++; + reg += 4; + } + return 0; +} + +int radeon_cs_parse(struct drm_device *dev, struct drm_file *file_priv, + void *ib, uint32_t *packets, uint32_t dwords) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + volatile int rb; + int size_dw = dwords; + /* scan the packet for various things */ + int count_dw = 0; + int ret = 0; + + while (count_dw < size_dw && ret == 0) { + int hdr = packets[count_dw]; + int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16; + int reg; + + switch (hdr & RADEON_CP_PACKET_MASK) { + case RADEON_CP_PACKET0: + ret = radeon_cs_packet0(dev, file_priv, packets, count_dw); + break; + case RADEON_CP_PACKET1: + case RADEON_CP_PACKET2: + reg = hdr & RADEON_CP_PACKET0_REG_MASK; + DRM_DEBUG("Packet 1/2: %d %x\n", num_dw, reg); + break; + + case RADEON_CP_PACKET3: + reg = hdr & 0xff00; + + switch(reg) { + case RADEON_CNTL_HOSTDATA_BLT: + radeon_cs_relocate_packet3(dev, file_priv, packets, count_dw); + break; + + case RADEON_CNTL_BITBLT_MULTI: + case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ + case RADEON_CP_INDX_BUFFER: + DRM_ERROR("need relocate packet 3 for %x\n", reg); + break; + + case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ + case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ + case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ + case RADEON_WAIT_FOR_IDLE: + case RADEON_CP_NOP: + break; + default: + DRM_ERROR("unknown packet 3 %x\n", reg); + ret = -EINVAL; + } + break; + } + + count_dw += num_dw+2; + } + + if (ret) + return ret; + + + /* copy the packet into the IB */ + memcpy(ib, packets, dwords * sizeof(uint32_t)); + + /* read back last byte to flush WC buffers */ + rb = readl((ib + (dwords-1) * sizeof(uint32_t))); + + return 0; +} + +uint32_t radeon_cs_id_get(struct drm_radeon_private *radeon) +{ + /* FIXME: protect with a spinlock */ + /* FIXME: check if wrap affect last reported wrap & sequence */ + radeon->cs.id_scnt = (radeon->cs.id_scnt + 1) & 0x00FFFFFF; + if (!radeon->cs.id_scnt) { + /* increment wrap counter */ + radeon->cs.id_wcnt += 0x01000000; + /* valid sequence counter start at 1 */ + radeon->cs.id_scnt = 1; + } + return (radeon->cs.id_scnt | radeon->cs.id_wcnt); +} + +void r100_cs_id_emit(struct drm_device *dev, uint32_t *id) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + /* ISYNC_CNTL should have CPSCRACTH bit set */ + *id = radeon_cs_id_get(dev_priv); + /* emit id in SCRATCH4 (not used yet in old drm) */ + BEGIN_RING(2); + OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG4, 0)); + OUT_RING(*id); + ADVANCE_RING(); +} + +void r300_cs_id_emit(struct drm_device *dev, uint32_t *id) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + /* ISYNC_CNTL should not have CPSCRACTH bit set */ + *id = radeon_cs_id_get(dev_priv); + /* emit id in SCRATCH6 */ + BEGIN_RING(6); + OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 0)); + OUT_RING(6); + OUT_RING(CP_PACKET0(R300_CP_RESYNC_DATA, 0)); + OUT_RING(*id); + OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); + OUT_RING(R300_RB3D_DC_FINISH); + ADVANCE_RING(); +} + +uint32_t r100_cs_id_last_get(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + return RADEON_READ(RADEON_SCRATCH_REG4); +} + +uint32_t r300_cs_id_last_get(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + return RADEON_READ(RADEON_SCRATCH_REG6); +} + +int radeon_cs_init(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + if (dev_priv->chip_family < CHIP_RV280) { + dev_priv->cs.id_emit = r100_cs_id_emit; + dev_priv->cs.id_last_get = r100_cs_id_last_get; + } else if (dev_priv->chip_family < CHIP_R600) { + dev_priv->cs.id_emit = r300_cs_id_emit; + dev_priv->cs.id_last_get = r300_cs_id_last_get; + } + + dev_priv->cs.parse = radeon_cs_parse; + /* ib get depends on memory manager or not so memory manager */ + dev_priv->cs.relocate = radeon_nomm_relocate; + return 0; +} diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h index 7fcf9305..8bb5d87c 100644 --- a/shared-core/radeon_drm.h +++ b/shared-core/radeon_drm.h @@ -455,6 +455,13 @@ typedef struct { int tiling_enabled; /* set by drm, read by 2d + 3d clients */ unsigned int last_fence; + + uint32_t front_handle; + uint32_t back_handle; + uint32_t depth_handle; + uint32_t front_pitch; + uint32_t back_pitch; + uint32_t depth_pitch; } drm_radeon_sarea_t; @@ -506,6 +513,7 @@ typedef struct { #define DRM_RADEON_GEM_SET_DOMAIN 0x23 #define DRM_RADEON_GEM_INDIRECT 0x24 // temporary for X server +#define DRM_RADEON_CS 0x25 #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) @@ -545,6 +553,7 @@ typedef struct { #define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) #define DRM_IOCTL_RADEON_GEM_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INDIRECT, struct drm_radeon_gem_indirect) +#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) typedef struct drm_radeon_init { @@ -703,6 +712,7 @@ typedef struct drm_radeon_indirect { #define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ #define RADEON_PARAM_FB_LOCATION 14 /* FB location */ #define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ +#define RADEON_PARAM_KERNEL_MM 16 typedef struct drm_radeon_getparam { int param; @@ -758,6 +768,7 @@ typedef struct drm_radeon_setparam { #define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ #define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ #define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ +#define RADEON_SETPARAM_MM_INIT 7 /* Initialise the mm */ /* 1.14: Clients can allocate/free a surface */ typedef struct drm_radeon_surface_alloc { @@ -861,4 +872,17 @@ struct drm_radeon_gem_indirect { uint32_t used; }; +/* New interface which obsolete all previous interface. + */ + + +struct drm_radeon_cs { +// uint32_t __user *packets; + uint32_t dwords; + uint32_t cs_id; + uint64_t packets; + +}; + + #endif diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 5ce97b64..9edd3884 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -195,11 +195,11 @@ enum radeon_mac_model { #define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \ - (dev_priv->mm.ring_read_ptr ? readl(dev_priv->mm.ring_read_ptr_map.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \ + (dev_priv->mm.ring_read.bo ? readl(dev_priv->mm.ring_read.kmap.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \ RADEON_READ(RADEON_CP_RB_RPTR)) -#define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read_ptr ? \ - writel((val), dev_priv->mm.ring_read_ptr_map.virtual) : \ +#define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read.bo ? \ + writel((val), dev_priv->mm.ring_read.kmap.virtual) : \ DRM_WRITE32((dev_priv)->ring_rptr, 0, (val))) typedef struct drm_radeon_freelist { @@ -261,6 +261,11 @@ struct radeon_virt_surface { struct drm_file *file_priv; }; +struct radeon_mm_obj { + struct drm_buffer_object *bo; + struct drm_bo_kmap_obj kmap; +}; + struct radeon_mm_info { uint64_t vram_offset; // Offset into GPU space uint64_t vram_size; @@ -268,15 +273,13 @@ struct radeon_mm_info { uint64_t gart_start; uint64_t gart_size; + + struct radeon_mm_obj pcie_table; + struct radeon_mm_obj ring; + struct radeon_mm_obj ring_read; - struct drm_buffer_object *pcie_table; - struct drm_bo_kmap_obj pcie_table_map; - - struct drm_buffer_object *ring; - struct drm_bo_kmap_obj ring_map; - - struct drm_buffer_object *ring_read_ptr; - struct drm_bo_kmap_obj ring_read_ptr_map; + struct radeon_mm_obj dma_bufs; + struct drm_map fake_agp_map; }; #include "radeon_mode.h" @@ -289,11 +292,35 @@ struct drm_radeon_master_private { #define RADEON_FLUSH_EMITED (1 < 0) #define RADEON_PURGE_EMITED (1 < 1) +/* command submission struct */ +struct drm_radeon_cs_priv { + uint32_t id_wcnt; + uint32_t id_scnt; + uint32_t id_last_wcnt; + uint32_t id_last_scnt; + + int (*parse)(struct drm_device *dev, struct drm_file *file_priv, + void *ib, uint32_t *packets, uint32_t dwords); + void (*id_emit)(struct drm_device *dev, uint32_t *id); + uint32_t (*id_last_get)(struct drm_device *dev); + /* this ib handling callback are for hidding memory manager drm + * from memory manager less drm, free have to emit ib discard + * sequence into the ring */ + int (*ib_get)(struct drm_device *dev, void **ib, uint32_t dwords, uint32_t *card_offset); + uint32_t (*ib_get_ptr)(struct drm_device *dev, void *ib); + void (*ib_free)(struct drm_device *dev, void *ib, uint32_t dwords); + /* do a relocation either MM or non-MM */ + int (*relocate)(struct drm_device *dev, struct drm_file *file_priv, + uint32_t *reloc, uint32_t *offset); +}; + typedef struct drm_radeon_private { drm_radeon_ring_buffer_t ring; - int new_memmap; + bool new_memmap; + + bool user_mm_enable; int gart_size; u32 gart_vm_start; @@ -372,6 +399,7 @@ typedef struct drm_radeon_private { uint32_t flags; /* see radeon_chip_flags */ unsigned long fb_aper_offset; + bool mm_enabled; struct radeon_mm_info mm; drm_local_map_t *mmio; @@ -390,11 +418,20 @@ typedef struct drm_radeon_private { bool is_ddr; u32 ram_width; + uint32_t mc_fb_location; + uint32_t mc_agp_loc_lo; + uint32_t mc_agp_loc_hi; + enum radeon_pll_errata pll_errata; int num_gb_pipes; int track_flush; uint32_t chip_family; /* extract from flags */ + + struct radeon_mm_obj **ib_objs; + /* ib bitmap */ + uint64_t ib_alloc_bitmap; // TO DO replace with a real bitmap + struct drm_radeon_cs_priv cs; } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { @@ -672,14 +709,15 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, #define RADEON_SCRATCH_REG3 0x15ec #define RADEON_SCRATCH_REG4 0x15f0 #define RADEON_SCRATCH_REG5 0x15f4 +#define RADEON_SCRATCH_REG6 0x15f8 #define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_ADDR 0x0774 #define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x)) #define GET_SCRATCH( x ) (dev_priv->writeback_works ? \ - (dev_priv->mm.ring_read_ptr ? \ - readl(dev_priv->mm.ring_read_ptr_map.virtual + RADEON_SCRATCHOFF(0)) : \ + (dev_priv->mm.ring_read.bo ? \ + readl(dev_priv->mm.ring_read.kmap.virtual + RADEON_SCRATCHOFF(0)) : \ DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(x))) : \ RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x))) @@ -1243,44 +1281,62 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, #define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) #define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) -extern int RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr); +extern u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr); extern void RADEON_WRITE_PLL(struct drm_radeon_private *dev_priv, int addr, uint32_t data); -#define RADEON_WRITE_PCIE( addr, val ) \ +#define RADEON_WRITE_P(reg, val, mask) \ +do { \ + uint32_t tmp = RADEON_READ(reg); \ + tmp &= (mask); \ + tmp |= ((val) & ~(mask)); \ + RADEON_WRITE(reg, tmp); \ +} while(0) + +#define RADEON_WRITE_PLL_P(dev_priv, addr, val, mask) \ +do { \ + uint32_t tmp_ = RADEON_READ_PLL(dev_priv, addr); \ + tmp_ &= (mask); \ + tmp_ |= ((val) & ~(mask)); \ + RADEON_WRITE_PLL(dev_priv, addr, tmp_); \ +} while (0) + + + +#define RADEON_WRITE_PCIE(addr, val) \ do { \ - RADEON_WRITE8( RADEON_PCIE_INDEX, \ + RADEON_WRITE8(RADEON_PCIE_INDEX, \ ((addr) & 0xff)); \ - RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \ + RADEON_WRITE(RADEON_PCIE_DATA, (val)); \ } while (0) -#define R500_WRITE_MCIND( addr, val ) \ +#define R500_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \ RADEON_WRITE(R520_MC_IND_DATA, (val)); \ RADEON_WRITE(R520_MC_IND_INDEX, 0); \ } while (0) -#define RS480_WRITE_MCIND( addr, val ) \ +#define RS480_WRITE_MCIND(addr, val) \ do { \ - RADEON_WRITE( RS480_NB_MC_INDEX, \ + RADEON_WRITE(RS480_NB_MC_INDEX, \ ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \ - RADEON_WRITE( RS480_NB_MC_DATA, (val) ); \ - RADEON_WRITE( RS480_NB_MC_INDEX, 0xff ); \ + RADEON_WRITE(RS480_NB_MC_DATA, (val)); \ + RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \ } while (0) -#define RS690_WRITE_MCIND( addr, val ) \ +#define RS690_WRITE_MCIND(addr, val) \ do { \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \ RADEON_WRITE(RS690_MC_DATA, val); \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \ } while (0) -#define IGP_WRITE_MCIND( addr, val ) \ +#define IGP_WRITE_MCIND(addr, val) \ do { \ - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ - RS690_WRITE_MCIND( addr, val ); \ - else \ - RS480_WRITE_MCIND( addr, val ); \ + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ + RS690_WRITE_MCIND(addr, val); \ + else \ + RS480_WRITE_MCIND(addr, val); \ } while (0) #define CP_PACKET0( reg, n ) \ @@ -1324,42 +1380,42 @@ do { \ #define RADEON_FLUSH_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(RADEON_RB3D_DC_FLUSH); \ + OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_DC_FLUSH); \ } else { \ - OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(R300_RB3D_DC_FLUSH); \ - } \ + OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_DC_FLUSH); \ + } \ } while (0) #define RADEON_PURGE_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ + OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ } else { \ - OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \ - } \ + OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \ + } \ } while (0) #define RADEON_FLUSH_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ - OUT_RING( RADEON_RB3D_ZC_FLUSH ); \ + OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_ZC_FLUSH); \ } else { \ - OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) ); \ - OUT_RING( R300_ZC_FLUSH ); \ - } \ + OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ + OUT_RING(R300_ZC_FLUSH); \ + } \ } while (0) #define RADEON_PURGE_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ - OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ + OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ } else { \ - OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ - OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ - } \ + OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ + OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ + } \ } while (0) /* ================================================================ @@ -1380,7 +1436,7 @@ do { \ #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ do { \ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \ - drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \ + drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ int __ret = radeon_do_cp_idle( dev_priv ); \ if ( __ret ) return __ret; \ @@ -1556,6 +1612,23 @@ static inline int radeon_update_breadcrumb(struct drm_device *dev) #define radeon_is_dce3(dev_priv) ((dev_priv->chip_family >= CHIP_RV620)) +#define radeon_is_rv100(dev_priv) ((dev_priv->chip_family == CHIP_RV100) || \ + (dev_priv->chip_family == CHIP_RV200) || \ + (dev_priv->chip_family == CHIP_RS100) || \ + (dev_priv->chip_family == CHIP_RS200) || \ + (dev_priv->chip_family == CHIP_RV250) || \ + (dev_priv->chip_family == CHIP_RV280) || \ + (dev_priv->chip_family == CHIP_RS300)) + +#define radeon_is_r300(dev_priv) ((dev_priv->chip_family == CHIP_R300) || \ + (dev_priv->chip_family == CHIP_RV350) || \ + (dev_priv->chip_family == CHIP_R350) || \ + (dev_priv->chip_family == CHIP_RV380) || \ + (dev_priv->chip_family == CHIP_R420) || \ + (dev_priv->chip_family == CHIP_RV410) || \ + (dev_priv->chip_family == CHIP_RS400) || \ + (dev_priv->chip_family == CHIP_RS480)) + #define radeon_bios8(dev_priv, v) (dev_priv->bios[v]) #define radeon_bios16(dev_priv, v) (dev_priv->bios[v] | (dev_priv->bios[(v) + 1] << 8)) #define radeon_bios32(dev_priv, v) ((dev_priv->bios[v]) | \ @@ -1563,6 +1636,7 @@ static inline int radeon_update_breadcrumb(struct drm_device *dev) (dev_priv->bios[(v) + 2] << 16) | \ (dev_priv->bios[(v) + 3] << 24)) +extern void radeon_pll_errata_after_index(struct drm_radeon_private *dev_priv); extern int radeon_emit_irq(struct drm_device * dev); extern void radeon_gem_free_object(struct drm_gem_object *obj); @@ -1585,11 +1659,22 @@ int radeon_modeset_init(struct drm_device *dev); void radeon_modeset_cleanup(struct drm_device *dev); extern u32 radeon_read_mc_reg(drm_radeon_private_t *dev_priv, int addr); extern void radeon_write_mc_reg(drm_radeon_private_t *dev_priv, u32 addr, u32 val); - +void radeon_read_agp_location(drm_radeon_private_t *dev_priv, u32 *agp_lo, u32 *agp_hi); +void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc); extern void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on); #define RADEONFB_CONN_LIMIT 4 extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); extern void radeon_cp_dispatch_flip(struct drm_device * dev, struct drm_master *master); +extern int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv); +extern int radeon_cs_init(struct drm_device *dev); +void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master); + +#define MARK_SAFE 1 +#define MARK_CHECK_OFFSET 2 +#define MARK_CHECK_SCISSOR 3 + +extern int r300_check_range(unsigned reg, int count); +extern int r300_get_reg_flags(unsigned reg); #endif /* __RADEON_DRV_H__ */ diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 6de4b135..7262b2aa 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -2223,6 +2223,9 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + if (dev_priv->mm.vram_offset) + radeon_gem_update_offsets(dev, file_priv->master); + radeon_cp_dispatch_swap(dev, file_priv->master); sarea_priv->ctx_owner = 0; @@ -3117,6 +3120,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil case RADEON_PARAM_NUM_GB_PIPES: value = dev_priv->num_gb_pipes; break; + case RADEON_PARAM_KERNEL_MM: + value = dev_priv->mm_enabled; + break; default: DRM_DEBUG( "Invalid parameter %d\n", param->param ); return -EINVAL; @@ -3178,6 +3184,10 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil case RADEON_SETPARAM_VBLANK_CRTC: return radeon_vblank_crtc_set(dev, sp->value); break; + case RADEON_SETPARAM_MM_INIT: + dev_priv->user_mm_enable = true; + dev_priv->new_memmap = true; + return radeon_gem_mm_init(dev); default: DRM_DEBUG("Invalid parameter %d\n", sp->param); return -EINVAL; @@ -3279,6 +3289,7 @@ struct drm_ioctl_desc radeon_ioctls[] = { DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_GEM_INDIRECT, radeon_gem_indirect_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), }; int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); |