From 1c787f0d396c309131d5f34939598d657ee2459f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 21 Aug 2006 20:38:57 +0200 Subject: Backwards compatibility code for ttms. --- linux-core/drm_compat.c | 140 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 linux-core/drm_compat.c (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c new file mode 100644 index 00000000..cdef4b97 --- /dev/null +++ b/linux-core/drm_compat.c @@ -0,0 +1,140 @@ +/************************************************************************** + * + * This kernel module is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + **************************************************************************/ +/* + * This code provides access to unexported mm kernel features. It is necessary + * to use the new DRM memory manager code with kernels that don't support it + * directly. + * + * Authors: Thomas Hellstrom + * Linux kernel mm subsystem authors. + * (Most code taken from there). + */ + +#include "drmP.h" +#include +#include +#include + +#ifdef MODULE +void pgd_clear_bad(pgd_t * pgd) +{ + pgd_ERROR(*pgd); + pgd_clear(pgd); +} + +void pud_clear_bad(pud_t * pud) +{ + pud_ERROR(*pud); + pud_clear(pud); +} + +void pmd_clear_bad(pmd_t * pmd) +{ + pmd_ERROR(*pmd); + pmd_clear(pmd); +} +#endif + +static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd, + unsigned long addr, unsigned long end) +{ + pte_t *pte; + + pte = pte_offset_map(pmd, addr); + do { + if (pte_present(*pte)) { + pte_t ptent; + ptent = *pte; + ptep_get_and_clear(mm, addr, pte); + lazy_mmu_prot_update(ptent); + } + } while (pte++, addr += PAGE_SIZE, addr != end); + pte_unmap(pte - 1); +} + +static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud, + unsigned long addr, unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + change_pte_range(mm, pmd, addr, next); + } while (pmd++, addr = next, addr != end); +} + +static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd, + unsigned long addr, unsigned long end) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + change_pmd_range(mm, pud, addr, next); + } while (pud++, addr = next, addr != end); +} + +/* + * This function should be called with all relevant spinlocks held. + */ + +void drm_clear_vma(struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd; + unsigned long next; +#if defined(flush_tlb_mm) || !defined(MODULE) + unsigned long start = addr; +#endif + BUG_ON(addr >= end); + pgd = pgd_offset(mm, addr); + flush_cache_range(vma, addr, end); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + change_pud_range(mm, pgd, addr, next); + } while (pgd++, addr = next, addr != end); +#if defined(flush_tlb_mm) || !defined(MODULE) + flush_tlb_range(vma, addr, end); +#endif +} + +pgprot_t drm_prot_map(uint32_t flags) +{ +#ifdef MODULE + static pgprot_t drm_protection_map[16] = { + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 + }; + + return drm_protection_map[flags & 0x0F]; +#else + extern pgprot_t protection_map[]; + return protection_map[flags & 0x0F]; +#endif +}; -- cgit v1.2.3 From 1d3cf107d20cb11ad07667622785ef8341ab9c2a Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 25 Aug 2006 18:14:22 +0200 Subject: Module protection map access is moving into mainline kernels. Update drm_compat accordingly. (Reported by Dave Airlie) --- linux-core/drm_compat.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index cdef4b97..86bae306 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -124,7 +124,7 @@ void drm_clear_vma(struct vm_area_struct *vma, #endif } -pgprot_t drm_prot_map(uint32_t flags) +pgprot_t vm_get_page_prot(unsigned long vm_flags) { #ifdef MODULE static pgprot_t drm_protection_map[16] = { @@ -132,9 +132,9 @@ pgprot_t drm_prot_map(uint32_t flags) __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; - return drm_protection_map[flags & 0x0F]; + return drm_protection_map[vm_flags & 0x0F]; #else extern pgprot_t protection_map[]; - return protection_map[flags & 0x0F]; + return protection_map[vm_flags & 0x0F]; #endif }; -- cgit v1.2.3 From 99acb7936660843090ea8a9f22d2d50d9433e0de Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 8 Sep 2006 17:24:38 +0200 Subject: Various bugfixes. --- linux-core/drm_compat.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 86bae306..162e4656 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -59,9 +59,14 @@ static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd, do { if (pte_present(*pte)) { pte_t ptent; - ptent = *pte; ptep_get_and_clear(mm, addr, pte); + ptent = *pte; lazy_mmu_prot_update(ptent); + } else { + ptep_get_and_clear(mm, addr, pte); + } + if (!pte_none(*pte)) { + DRM_ERROR("Ugh. Pte was presen\n"); } } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap(pte - 1); -- cgit v1.2.3 From 682c6ed0293771b093452597540118f47fda1adf Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 14 Sep 2006 12:17:38 +0200 Subject: Remove the use of reserved pages, and use locked pages instead. Update compatibility for latest linux versions. --- linux-core/drm_compat.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 162e4656..d387678e 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -54,19 +54,37 @@ static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long addr, unsigned long end) { pte_t *pte; + struct page *page; + unsigned long pfn; pte = pte_offset_map(pmd, addr); do { if (pte_present(*pte)) { pte_t ptent; - ptep_get_and_clear(mm, addr, pte); + pfn = pte_pfn(*pte); ptent = *pte; - lazy_mmu_prot_update(ptent); - } else { ptep_get_and_clear(mm, addr, pte); - } - if (!pte_none(*pte)) { - DRM_ERROR("Ugh. Pte was presen\n"); + if (pfn_valid(pfn)) { + page = pfn_to_page(pfn); + if (atomic_add_negative(-1, &page->_mapcount)) { + if (page_test_and_clear_dirty(page)) + set_page_dirty(page); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + dec_zone_page_state(page, NR_FILE_MAPPED); +#else + dec_page_state(nr_mapped); +#endif + } + + put_page(page); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) + dec_mm_counter(mm, file_rss); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) + dec_mm_counter(mm, rss); +#else + --mm->rss; +#endif + } } } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap(pte - 1); -- cgit v1.2.3 From ca1b15d645c74e20f638f5a09981bcf02f58caee Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 18 Sep 2006 20:43:31 +0200 Subject: Alternative implementation of page table zeroing using zap page_range. (Disabled for now) Fix bo_wait_idle bug. Remove stray debug message. --- linux-core/drm_compat.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index d387678e..e56f6608 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -124,6 +124,7 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd, * This function should be called with all relevant spinlocks held. */ +#if 1 void drm_clear_vma(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { @@ -146,6 +147,19 @@ void drm_clear_vma(struct vm_area_struct *vma, flush_tlb_range(vma, addr, end); #endif } +#else + +void drm_clear_vma(struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + spin_unlock(&mm->page_table_lock); + (void) zap_page_range(vma, addr, end - addr, NULL); + spin_lock(&mm->page_table_lock); +} +#endif + pgprot_t vm_get_page_prot(unsigned long vm_flags) { -- cgit v1.2.3 From 235f6fc650e9974211843b9196a903963dae0211 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 27 Sep 2006 09:27:31 +0200 Subject: Adapt to architecture-specific hooks for gatt pages. --- linux-core/drm_compat.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index e56f6608..8dbc636a 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -160,6 +160,26 @@ void drm_clear_vma(struct vm_area_struct *vma, } #endif +#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +int drm_map_page_into_agp(struct page *page) +{ + int i; + i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); + /* Caller's responsibility to call global_flush_tlb() for + * performance reasons */ + return i; +} + +int drm_unmap_page_from_agp(struct page *page) +{ + int i; + i = change_page_attr(page, 1, PAGE_KERNEL); + /* Caller's responsibility to call global_flush_tlb() for + * performance reasons */ + return i; +} +#endif + pgprot_t vm_get_page_prot(unsigned long vm_flags) { -- cgit v1.2.3 From cee659afb56e7ac443402ac791144f391721061e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 3 Oct 2006 12:08:07 +0200 Subject: Get rid of all ugly PTE hacks. --- linux-core/drm_compat.c | 133 ------------------------------------------------ 1 file changed, 133 deletions(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 8dbc636a..81a2bd84 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -26,139 +26,6 @@ */ #include "drmP.h" -#include -#include -#include - -#ifdef MODULE -void pgd_clear_bad(pgd_t * pgd) -{ - pgd_ERROR(*pgd); - pgd_clear(pgd); -} - -void pud_clear_bad(pud_t * pud) -{ - pud_ERROR(*pud); - pud_clear(pud); -} - -void pmd_clear_bad(pmd_t * pmd) -{ - pmd_ERROR(*pmd); - pmd_clear(pmd); -} -#endif - -static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd, - unsigned long addr, unsigned long end) -{ - pte_t *pte; - struct page *page; - unsigned long pfn; - - pte = pte_offset_map(pmd, addr); - do { - if (pte_present(*pte)) { - pte_t ptent; - pfn = pte_pfn(*pte); - ptent = *pte; - ptep_get_and_clear(mm, addr, pte); - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (atomic_add_negative(-1, &page->_mapcount)) { - if (page_test_and_clear_dirty(page)) - set_page_dirty(page); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) - dec_zone_page_state(page, NR_FILE_MAPPED); -#else - dec_page_state(nr_mapped); -#endif - } - - put_page(page); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) - dec_mm_counter(mm, file_rss); -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) - dec_mm_counter(mm, rss); -#else - --mm->rss; -#endif - } - } - } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(pte - 1); -} - -static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud, - unsigned long addr, unsigned long end) -{ - pmd_t *pmd; - unsigned long next; - - pmd = pmd_offset(pud, addr); - do { - next = pmd_addr_end(addr, end); - if (pmd_none_or_clear_bad(pmd)) - continue; - change_pte_range(mm, pmd, addr, next); - } while (pmd++, addr = next, addr != end); -} - -static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd, - unsigned long addr, unsigned long end) -{ - pud_t *pud; - unsigned long next; - - pud = pud_offset(pgd, addr); - do { - next = pud_addr_end(addr, end); - if (pud_none_or_clear_bad(pud)) - continue; - change_pmd_range(mm, pud, addr, next); - } while (pud++, addr = next, addr != end); -} - -/* - * This function should be called with all relevant spinlocks held. - */ - -#if 1 -void drm_clear_vma(struct vm_area_struct *vma, - unsigned long addr, unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - pgd_t *pgd; - unsigned long next; -#if defined(flush_tlb_mm) || !defined(MODULE) - unsigned long start = addr; -#endif - BUG_ON(addr >= end); - pgd = pgd_offset(mm, addr); - flush_cache_range(vma, addr, end); - do { - next = pgd_addr_end(addr, end); - if (pgd_none_or_clear_bad(pgd)) - continue; - change_pud_range(mm, pgd, addr, next); - } while (pgd++, addr = next, addr != end); -#if defined(flush_tlb_mm) || !defined(MODULE) - flush_tlb_range(vma, addr, end); -#endif -} -#else - -void drm_clear_vma(struct vm_area_struct *vma, - unsigned long addr, unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - - spin_unlock(&mm->page_table_lock); - (void) zap_page_range(vma, addr, end - addr, NULL); - spin_lock(&mm->page_table_lock); -} -#endif #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) int drm_map_page_into_agp(struct page *page) -- cgit v1.2.3 From c58574c60505a699e19e1ed59e1b441be2594e53 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 10 Oct 2006 10:37:26 +0200 Subject: Use a nopage-based approach to fault in pfns. --- linux-core/drm_compat.c | 79 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 81a2bd84..2b449e90 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -62,3 +62,82 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) return protection_map[vm_flags & 0x0F]; #endif }; + +int drm_pte_is_clear(struct vm_area_struct *vma, + unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + int ret = 1; + pte_t *pte; + pmd_t *pmd; + pud_t *pud; + pgd_t *pgd; + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + spin_lock(&mm->page_table_lock); +#else + spinlock_t ptl; +#endif + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) + goto unlock; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + goto unlock; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + goto unlock; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + pte = pte_offset_map(pmd, addr); +#else + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); +#endif + if (!pte) + goto unlock; + ret = pte_none(*pte); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + pte_unmap(pte); + unlock: + spin_unlock(&mm->page_table_lock); +#else + pte_unmap_unlock(pte, ptl); + unlock: +#endif + return ret; +} + + +static struct { + spinlock_t lock; + struct page *dummy_page; + atomic_t present; +} drm_np_retry = +{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)}; + +struct page * get_nopage_retry(void) +{ + if (atomic_read(&drm_np_retry.present) == 0) { + struct page *page = alloc_page(GFP_KERNEL); + if (!page) + return NOPAGE_OOM; + spin_lock(&drm_np_retry.lock); + drm_np_retry.dummy_page = page; + atomic_set(&drm_np_retry.present,1); + spin_unlock(&drm_np_retry.lock); + } + get_page(drm_np_retry.dummy_page); + return drm_np_retry.dummy_page; +} + +void free_nopage_retry(void) +{ + if (atomic_read(&drm_np_retry.present) == 1) { + spin_lock(&drm_np_retry.lock); + __free_page(drm_np_retry.dummy_page); + drm_np_retry.dummy_page = NULL; + atomic_set(&drm_np_retry.present, 0); + spin_unlock(&drm_np_retry.lock); + } +} -- cgit v1.2.3 From f2db76e2f206d2017f710eaddc4b33add4498898 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 11 Oct 2006 13:40:35 +0200 Subject: Big update: Adapt for new functions in the 2.6.19 kernel. Remove the ability to have multiple regions in one TTM. This simplifies a lot of code. Remove the ability to access TTMs from user space. We don't need it anymore without ttm regions. Don't change caching policy for evicted buffers. Instead change it only when the buffer is accessed by the CPU (on the first page fault). This tremendously speeds up eviction rates. Current code is safe for kernels <= 2.6.14. Should also be OK with 2.6.19 and above. --- linux-core/drm_compat.c | 50 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 4 deletions(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 2b449e90..1aa835ca 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -63,8 +63,10 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) #endif }; -int drm_pte_is_clear(struct vm_area_struct *vma, - unsigned long addr) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +static int drm_pte_is_clear(struct vm_area_struct *vma, + unsigned long addr) { struct mm_struct *mm = vma->vm_mm; int ret = 1; @@ -77,7 +79,7 @@ int drm_pte_is_clear(struct vm_area_struct *vma, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) spin_lock(&mm->page_table_lock); #else - spinlock_t ptl; + spinlock_t *ptl; #endif pgd = pgd_offset(mm, addr); @@ -92,7 +94,7 @@ int drm_pte_is_clear(struct vm_area_struct *vma, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) pte = pte_offset_map(pmd, addr); #else - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); #endif if (!pte) goto unlock; @@ -108,6 +110,17 @@ int drm_pte_is_clear(struct vm_area_struct *vma, return ret; } +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot) +{ + int ret; + if (!drm_pte_is_clear(vma, addr)) + return -EBUSY; + + ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot); + return ret; +} + static struct { spinlock_t lock; @@ -141,3 +154,32 @@ void free_nopage_retry(void) spin_unlock(&drm_np_retry.lock); } } +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, + unsigned long address, + int *type) +{ + struct fault_data data; + + if (type) + *type = VM_FAULT_MINOR; + + data.address = address; + data.vma = vma; + drm_vm_ttm_fault(vma, &data); + switch (data.type) { + case VM_FAULT_OOM: + return NOPAGE_OOM; + case VM_FAULT_SIGBUS: + return NOPAGE_SIGBUS; + default: + break; + } + + return NOPAGE_REFAULT; +} + +#endif -- cgit v1.2.3 From 30703893674b3da5b862dee2acd6efca13424398 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 11 Oct 2006 22:21:01 +0200 Subject: Compatibility code for 2.6.15-2.6.18. It is ugly but a little comfort is that it will go away in the mainstream kernel. Some bugfixes, mainly in error paths. --- linux-core/drm_compat.c | 236 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 236 insertions(+) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 1aa835ca..5287614d 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -183,3 +183,239 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, } #endif + +#ifdef DRM_ODD_MM_COMPAT + +typedef struct p_mm_entry { + struct list_head head; + struct mm_struct *mm; + atomic_t refcount; + int locked; +} p_mm_entry_t; + +typedef struct vma_entry { + struct list_head head; + struct vm_area_struct *vma; +} vma_entry_t; + + +struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, + unsigned long address, + int *type) +{ + drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; + unsigned long page_offset; + struct page *page; + drm_ttm_t *ttm; + drm_buffer_manager_t *bm; + drm_device_t *dev; + + /* + * FIXME: Check can't map aperture flag. + */ + + if (type) + *type = VM_FAULT_MINOR; + + if (!map) + return NOPAGE_OOM; + + if (address > vma->vm_end) + return NOPAGE_SIGBUS; + + ttm = (drm_ttm_t *) map->offset; + dev = ttm->dev; + mutex_lock(&dev->struct_mutex); + drm_fixup_ttm_caching(ttm); + BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED); + + bm = &dev->bm; + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + page = ttm->pages[page_offset]; + + if (!page) { + if (bm->cur_pages >= bm->max_pages) { + DRM_ERROR("Maximum locked page count exceeded\n"); + page = NOPAGE_OOM; + goto out; + } + page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); + if (!page) { + page = NOPAGE_OOM; + goto out; + } + ++bm->cur_pages; + SetPageLocked(page); + } + + get_page(page); + out: + mutex_unlock(&dev->struct_mutex); + return page; +} + + + + +int drm_ttm_map_bound(struct vm_area_struct *vma) +{ + drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; + drm_ttm_t *ttm = (drm_ttm_t *) map->offset; + int ret = 0; + + if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { + unsigned long pfn = ttm->aper_offset + + (ttm->be->aperture_base >> PAGE_SHIFT); + pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma); + + ret = io_remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, + pgprot); + } + return ret; +} + + +int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +{ + p_mm_entry_t *entry, *n_entry; + vma_entry_t *v_entry; + drm_local_map_t *map = (drm_local_map_t *) + vma->vm_private_data; + struct mm_struct *mm = vma->vm_mm; + + v_entry = drm_alloc(sizeof(*v_entry), DRM_MEM_TTM); + if (!v_entry) { + DRM_ERROR("Allocation of vma pointer entry failed\n"); + return -ENOMEM; + } + v_entry->vma = vma; + map->handle = (void *) v_entry; + list_add_tail(&v_entry->head, &ttm->vma_list); + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + if (mm == entry->mm) { + atomic_inc(&entry->refcount); + return 0; + } else if ((unsigned long)mm < (unsigned long)entry->mm) ; + } + + n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM); + if (!n_entry) { + DRM_ERROR("Allocation of process mm pointer entry failed\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&n_entry->head); + n_entry->mm = mm; + n_entry->locked = 0; + atomic_set(&n_entry->refcount, 0); + list_add_tail(&n_entry->head, &entry->head); + + return 0; +} + +void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +{ + p_mm_entry_t *entry, *n; + vma_entry_t *v_entry, *v_n; + int found = 0; + struct mm_struct *mm = vma->vm_mm; + + list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) { + if (v_entry->vma == vma) { + found = 1; + list_del(&v_entry->head); + drm_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM); + break; + } + } + BUG_ON(!found); + + list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) { + if (mm == entry->mm) { + if (atomic_add_negative(-1, &entry->refcount)) { + list_del(&entry->head); + BUG_ON(entry->locked); + drm_free(entry, sizeof(*entry), DRM_MEM_TTM); + } + return; + } + } + BUG_ON(1); +} + + + +int drm_ttm_lock_mm(drm_ttm_t * ttm) +{ + p_mm_entry_t *entry; + int lock_ok = 1; + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + BUG_ON(entry->locked); + if (!down_write_trylock(&entry->mm->mmap_sem)) { + lock_ok = 0; + break; + } + entry->locked = 1; + } + + if (lock_ok) + return 0; + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + if (!entry->locked) + break; + up_write(&entry->mm->mmap_sem); + entry->locked = 0; + } + + /* + * Possible deadlock. Try again. Our callers should handle this + * and restart. + */ + + return -EAGAIN; +} + +void drm_ttm_unlock_mm(drm_ttm_t * ttm) +{ + p_mm_entry_t *entry; + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + BUG_ON(!entry->locked); + up_write(&entry->mm->mmap_sem); + entry->locked = 0; + } +} + +int drm_ttm_remap_bound(drm_ttm_t *ttm) +{ + vma_entry_t *v_entry; + int ret = 0; + + list_for_each_entry(v_entry, &ttm->vma_list, head) { + ret = drm_ttm_map_bound(v_entry->vma); + if (ret) + break; + } + + drm_ttm_unlock_mm(ttm); + return ret; +} + +void drm_ttm_finish_unmap(drm_ttm_t *ttm) +{ + vma_entry_t *v_entry; + + if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) + return; + + list_for_each_entry(v_entry, &ttm->vma_list, head) { + v_entry->vma->vm_flags &= ~VM_PFNMAP; + } + drm_ttm_unlock_mm(ttm); +} + +#endif + -- cgit v1.2.3 From d515936ea7f98f6aaa9217699796beadef9d664b Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 17 Oct 2006 19:40:57 +0200 Subject: Add memory usage accounting to avoid DOS problems. --- linux-core/drm_compat.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 5287614d..4a035f49 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -239,8 +239,13 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, page = NOPAGE_OOM; goto out; } + if (drm_alloc_memctl(PAGE_SIZE)) { + page = NOPAGE_OOM; + goto out; + } page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); if (!page) { + drm_free_memctl(PAGE_SIZE); page = NOPAGE_OOM; goto out; } @@ -284,7 +289,7 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) vma->vm_private_data; struct mm_struct *mm = vma->vm_mm; - v_entry = drm_alloc(sizeof(*v_entry), DRM_MEM_TTM); + v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM); if (!v_entry) { DRM_ERROR("Allocation of vma pointer entry failed\n"); return -ENOMEM; @@ -300,7 +305,7 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) } else if ((unsigned long)mm < (unsigned long)entry->mm) ; } - n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM); + n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM); if (!n_entry) { DRM_ERROR("Allocation of process mm pointer entry failed\n"); return -ENOMEM; @@ -325,7 +330,7 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) if (v_entry->vma == vma) { found = 1; list_del(&v_entry->head); - drm_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM); + drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM); break; } } @@ -336,7 +341,7 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) if (atomic_add_negative(-1, &entry->refcount)) { list_del(&entry->head); BUG_ON(entry->locked); - drm_free(entry, sizeof(*entry), DRM_MEM_TTM); + drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM); } return; } -- cgit v1.2.3 From c34faf224b959bf61e4c3eb29c66a12edbd31841 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 17 Oct 2006 20:03:26 +0200 Subject: Remove max number of locked pages check and call, since that is now handled by the memory accounting. --- linux-core/drm_compat.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 4a035f49..90e53419 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -234,11 +234,6 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, page = ttm->pages[page_offset]; if (!page) { - if (bm->cur_pages >= bm->max_pages) { - DRM_ERROR("Maximum locked page count exceeded\n"); - page = NOPAGE_OOM; - goto out; - } if (drm_alloc_memctl(PAGE_SIZE)) { page = NOPAGE_OOM; goto out; -- cgit v1.2.3 From 3624e43282b0c6aad32829f116fd8f7bce66fbb6 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 20 Oct 2006 15:06:31 +0200 Subject: Bug #8707, 2.6.19-rc compatibility for memory manager code. --- linux-core/drm_compat.c | 53 ++++++++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 20 deletions(-) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 90e53419..b466f8bd 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -28,6 +28,11 @@ #include "drmP.h" #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +/* + * These have bad performance in the AGP module for the indicated kernel versions. + */ + int drm_map_page_into_agp(struct page *page) { int i; @@ -45,8 +50,14 @@ int drm_unmap_page_from_agp(struct page *page) * performance reasons */ return i; } -#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +/* + * The protection map was exported in 2.6.19 + */ pgprot_t vm_get_page_prot(unsigned long vm_flags) { @@ -62,8 +73,17 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) return protection_map[vm_flags & 0x0F]; #endif }; +#endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +/* + * vm code for kernels below 2,6,15 in which version a major vm write + * occured. This implement a simple straightforward + * version similar to what's going to be + * in kernel 2.6.20+? + */ static int drm_pte_is_clear(struct vm_area_struct *vma, unsigned long addr) @@ -76,12 +96,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, pgd_t *pgd; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) spin_lock(&mm->page_table_lock); -#else - spinlock_t *ptl; -#endif - pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto unlock; @@ -91,22 +106,13 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) goto unlock; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) pte = pte_offset_map(pmd, addr); -#else - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); -#endif if (!pte) goto unlock; ret = pte_none(*pte); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) pte_unmap(pte); unlock: spin_unlock(&mm->page_table_lock); -#else - pte_unmap_unlock(pte, ptl); - unlock: -#endif return ret; } @@ -121,7 +127,6 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, return ret; } - static struct { spinlock_t lock; struct page *dummy_page; @@ -154,9 +159,6 @@ void free_nopage_retry(void) spin_unlock(&drm_np_retry.lock); } } -#endif - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, unsigned long address, @@ -186,6 +188,17 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, #ifdef DRM_ODD_MM_COMPAT +/* + * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated + * workaround for a single BUG statement in do_no_page in these versions. The + * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_ + * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to + * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this + * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex, + * release the cpu and retry. We also need to keep track of all vmas mapping the ttm. + * phew. + */ + typedef struct p_mm_entry { struct list_head head; struct mm_struct *mm; -- cgit v1.2.3