summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-10-03 12:08:07 +0200
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-10-03 12:08:07 +0200
commitcee659afb56e7ac443402ac791144f391721061e (patch)
tree45d68bf6a269f626f6a2178c8c0b5987af5c96e7
parent16be6ba63a41f03e98a741464d3b51eefb277373 (diff)
Get rid of all ugly PTE hacks.
-rw-r--r--linux-core/drmP.h1
-rw-r--r--linux-core/drm_compat.c133
-rw-r--r--linux-core/drm_drv.c1
-rw-r--r--linux-core/drm_fops.c7
-rw-r--r--linux-core/drm_ttm.c77
-rw-r--r--linux-core/drm_ttm.h1
6 files changed, 29 insertions, 191 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index f17a3421..089059c8 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -874,6 +874,7 @@ typedef struct drm_device {
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
drm_open_hash_t object_hash; /**< User token hash table for objects */
+ struct address_space *dev_mapping; /**< For unmap_mapping_range() */
/** \name Context handle management */
/*@{ */
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 8dbc636a..81a2bd84 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -26,139 +26,6 @@
*/
#include "drmP.h"
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-#ifdef MODULE
-void pgd_clear_bad(pgd_t * pgd)
-{
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
-}
-
-void pud_clear_bad(pud_t * pud)
-{
- pud_ERROR(*pud);
- pud_clear(pud);
-}
-
-void pmd_clear_bad(pmd_t * pmd)
-{
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
-}
-#endif
-
-static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd,
- unsigned long addr, unsigned long end)
-{
- pte_t *pte;
- struct page *page;
- unsigned long pfn;
-
- pte = pte_offset_map(pmd, addr);
- do {
- if (pte_present(*pte)) {
- pte_t ptent;
- pfn = pte_pfn(*pte);
- ptent = *pte;
- ptep_get_and_clear(mm, addr, pte);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (atomic_add_negative(-1, &page->_mapcount)) {
- if (page_test_and_clear_dirty(page))
- set_page_dirty(page);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
- dec_zone_page_state(page, NR_FILE_MAPPED);
-#else
- dec_page_state(nr_mapped);
-#endif
- }
-
- put_page(page);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
- dec_mm_counter(mm, file_rss);
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
- dec_mm_counter(mm, rss);
-#else
- --mm->rss;
-#endif
- }
- }
- } while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap(pte - 1);
-}
-
-static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud,
- unsigned long addr, unsigned long end)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (pmd_none_or_clear_bad(pmd))
- continue;
- change_pte_range(mm, pmd, addr, next);
- } while (pmd++, addr = next, addr != end);
-}
-
-static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd,
- unsigned long addr, unsigned long end)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- change_pmd_range(mm, pud, addr, next);
- } while (pud++, addr = next, addr != end);
-}
-
-/*
- * This function should be called with all relevant spinlocks held.
- */
-
-#if 1
-void drm_clear_vma(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgd;
- unsigned long next;
-#if defined(flush_tlb_mm) || !defined(MODULE)
- unsigned long start = addr;
-#endif
- BUG_ON(addr >= end);
- pgd = pgd_offset(mm, addr);
- flush_cache_range(vma, addr, end);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- change_pud_range(mm, pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-#if defined(flush_tlb_mm) || !defined(MODULE)
- flush_tlb_range(vma, addr, end);
-#endif
-}
-#else
-
-void drm_clear_vma(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
-
- spin_unlock(&mm->page_table_lock);
- (void) zap_page_range(vma, addr, end - addr, NULL);
- spin_lock(&mm->page_table_lock);
-}
-#endif
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
int drm_map_page_into_agp(struct page *page)
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index ae0c37a5..4cbe035f 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -263,6 +263,7 @@ int drm_lastclose(drm_device_t * dev)
dev->lock.filp = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
+ dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
if (drm_bo_clean_mm(dev)) {
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 5593e55c..b60ced34 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -158,6 +158,12 @@ int drm_open(struct inode *inode, struct file *filp)
}
spin_unlock(&dev->count_lock);
}
+ mutex_lock(&dev->struct_mutex);
+ BUG_ON((dev->dev_mapping != NULL) &&
+ (dev->dev_mapping != inode->i_mapping));
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = inode->i_mapping;
+ mutex_unlock(&dev->struct_mutex);
return retcode;
}
@@ -465,6 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
+
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
drm_ctx_list_t *pos, *n;
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 311c57fa..ed50da90 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -123,31 +123,12 @@ void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm)
BUG_ON(1);
}
-static void drm_ttm_lock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
+static void drm_ttm_unlock_mm(drm_ttm_t * ttm)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
- if (mm_sem) {
- down_write(&entry->mm->mmap_sem);
- }
- if (page_table) {
- spin_lock(&entry->mm->page_table_lock);
- }
- }
-}
-
-static void drm_ttm_unlock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
-{
- p_mm_entry_t *entry;
-
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
- if (page_table) {
- spin_unlock(&entry->mm->page_table_lock);
- }
- if (mm_sem) {
- up_write(&entry->mm->mmap_sem);
- }
+ up_write(&entry->mm->mmap_sem);
}
}
@@ -180,30 +161,13 @@ static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long num_pages)
{
- struct list_head *list;
-
-#if !defined(flush_tlb_mm) && defined(MODULE)
- int flush_tlb = 0;
-#endif
- list_for_each(list, &ttm->vma_list->head) {
- drm_ttm_vma_list_t *entry =
- list_entry(list, drm_ttm_vma_list_t, head);
-
- drm_clear_vma(entry->vma,
- entry->vma->vm_start +
- (page_offset << PAGE_SHIFT),
- entry->vma->vm_start +
- ((page_offset + num_pages) << PAGE_SHIFT));
-
-#if !defined(flush_tlb_mm) && defined(MODULE)
- flush_tlb = 1;
-#endif
- }
-#if !defined(flush_tlb_mm) && defined(MODULE)
- if (flush_tlb)
- global_flush_tlb();
-#endif
+ drm_device_t *dev = ttm->dev;
+ loff_t offset = ((loff_t) ttm->mapping_offset + page_offset)
+ << PAGE_SHIFT;
+ loff_t holelen = num_pages << PAGE_SHIFT;
+
+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
return 0;
}
@@ -437,15 +401,16 @@ static int drm_ttm_lock_mmap_sem(drm_ttm_t * ttm)
}
/*
- * Change caching policy for range of pages in a ttm.
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
*/
static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
- unsigned long num_pages, int noncached,
- int do_tlbflush)
+ unsigned long num_pages, int noncached)
{
int i, cur;
struct page **cur_page;
+ int do_tlbflush = 0;
for (i = 0; i < num_pages; ++i) {
cur = page_offset + i;
@@ -467,6 +432,7 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
} else {
unmap_page_from_agp(*cur_page);
}
+ do_tlbflush = 1;
}
}
}
@@ -492,16 +458,14 @@ int drm_evict_ttm_region(drm_ttm_backend_list_t * entry)
ret = drm_ttm_lock_mmap_sem(ttm);
if (ret)
return ret;
- drm_ttm_lock_mm(ttm, 0, 1);
unmap_vma_pages(ttm, entry->page_offset,
entry->num_pages);
- drm_ttm_unlock_mm(ttm, 0, 1);
}
be->unbind(entry->be);
if (ttm && be->needs_cache_adjust(be)) {
drm_set_caching(ttm, entry->page_offset,
- entry->num_pages, 0, 1);
- drm_ttm_unlock_mm(ttm, 1, 0);
+ entry->num_pages, 0);
+ drm_ttm_unlock_mm(ttm);
}
break;
default:
@@ -653,20 +617,17 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
if (ret)
return ret;
- drm_ttm_lock_mm(ttm, 0, 1);
unmap_vma_pages(ttm, region->page_offset,
region->num_pages);
- drm_ttm_unlock_mm(ttm, 0, 1);
-
drm_set_caching(ttm, region->page_offset, region->num_pages,
- DRM_TTM_PAGE_UNCACHED, 1);
+ DRM_TTM_PAGE_UNCACHED);
} else {
DRM_DEBUG("Binding cached\n");
}
if ((ret = be->bind(be, aper_offset))) {
if (ttm && be->needs_cache_adjust(be))
- drm_ttm_unlock_mm(ttm, 1, 0);
+ drm_ttm_unlock_mm(ttm);
drm_unbind_ttm_region(region);
DRM_ERROR("Couldn't bind backend.\n");
return ret;
@@ -682,7 +643,7 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
if (ttm && be->needs_cache_adjust(be)) {
ioremap_vmas(ttm, region->page_offset, region->num_pages,
aper_offset);
- drm_ttm_unlock_mm(ttm, 1, 0);
+ drm_ttm_unlock_mm(ttm);
}
region->state = ttm_bound;
@@ -924,7 +885,7 @@ int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
}
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
-
+ ttm->mapping_offset = list->hash.key;
atomic_set(&object->usage, 1);
*ttm_object = object;
return 0;
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
index d65b17de..53afe792 100644
--- a/linux-core/drm_ttm.h
+++ b/linux-core/drm_ttm.h
@@ -100,6 +100,7 @@ typedef struct drm_ttm {
atomic_t vma_count;
int mmap_sem_locked;
int destroy;
+ uint32_t mapping_offset;
} drm_ttm_t;
typedef struct drm_ttm_object {