summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-09-27 09:27:31 +0200
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-09-27 09:27:31 +0200
commit235f6fc650e9974211843b9196a903963dae0211 (patch)
treec1665e8232044685f7c5e023c0db55ddca6f7cb8 /linux-core
parentbd8ca12b7baff778d5bb7b4ad1d38d16b60a4d5a (diff)
Adapt to architecture-specific hooks for gatt pages.
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/drm_agpsupport.c2
-rw-r--r--linux-core/drm_bo.c2
-rw-r--r--linux-core/drm_compat.c20
-rw-r--r--linux-core/drm_compat.h23
-rw-r--r--linux-core/drm_ttm.c32
-rw-r--r--linux-core/drm_vm.c3
6 files changed, 59 insertions, 23 deletions
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 22987b07..2dd80162 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -586,7 +586,7 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
mem->page_count = 0;
for (cur_page = pages; cur_page < last_page; ++cur_page) {
- mem->memory[mem->page_count++] = page_to_phys(*cur_page);
+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
}
agp_priv->mem = mem;
return 0;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index f479c81a..4f1c4173 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -1562,7 +1562,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
if (arg.req.tt_p_size) {
ret = drm_mm_init(&bm->tt_manager,
arg.req.tt_p_offset,
- arg.req.tt_p_size);
+ 3000 /*arg.req.tt_p_size*/);
bm->has_tt = 1;
bm->use_tt = 1;
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index e56f6608..8dbc636a 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -160,6 +160,26 @@ void drm_clear_vma(struct vm_area_struct *vma,
}
#endif
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+int drm_map_page_into_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
+ return i;
+}
+
+int drm_unmap_page_from_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL);
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
+ return i;
+}
+#endif
+
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 779a7000..cf84a70b 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -31,6 +31,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <asm/agp.h>
#ifndef _DRM_COMPAT_H_
#define _DRM_COMPAT_H_
@@ -245,4 +246,26 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
+/*
+ * These are similar to the current kernel gatt pages allocator, only that we
+ * want a struct page pointer instead of a virtual address. This allows for pages
+ * that are not in the kernel linear map.
+ */
+
+#define drm_alloc_gatt_pages(order) virt_to_page(alloc_gatt_pages(order))
+#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These are too slow in earlier kernels.
+ */
+
+extern int drm_unmap_page_from_agp(struct page *page);
+extern int drm_map_page_into_agp(struct page *page);
+
+#define map_page_into_agp drm_map_page_into_agp
+#define unmap_page_from_agp drm_unmap_page_from_agp
+#endif
+
#endif
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index f72e7d30..6790c886 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -170,7 +170,6 @@ static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
if (ret)
break;
}
- global_flush_tlb();
return ret;
}
@@ -182,9 +181,7 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long num_pages)
{
struct list_head *list;
- struct page **first_page = ttm->pages + page_offset;
- struct page **last_page = ttm->pages + (page_offset + num_pages);
- struct page **cur_page;
+
#if !defined(flush_tlb_mm) && defined(MODULE)
int flush_tlb = 0;
#endif
@@ -207,13 +204,6 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
global_flush_tlb();
#endif
- for (cur_page = first_page; cur_page != last_page; ++cur_page) {
- if (page_mapped(*cur_page)) {
- DRM_ERROR("Mapped page detected. Map count is %d\n",
- page_mapcount(*cur_page));
- return -1;
- }
- }
return 0;
}
@@ -258,7 +248,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
if (ttm->page_flags &&
(ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) &&
*cur_page && !PageHighMem(*cur_page)) {
- change_page_attr(*cur_page, 1, PAGE_KERNEL);
+ unmap_page_from_agp(*cur_page);
do_tlbflush = 1;
}
if (*cur_page) {
@@ -278,19 +268,20 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
* End debugging.
*/
- __free_page(*cur_page);
+ drm_free_gatt_pages(*cur_page, 0);
--bm->cur_pages;
}
}
if (do_tlbflush)
- global_flush_tlb();
+ flush_agp_mappings();
ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
DRM_MEM_TTM);
ttm->pages = NULL;
}
if (ttm->page_flags) {
- ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), DRM_MEM_TTM);
+ ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags),
+ DRM_MEM_TTM);
ttm->page_flags = NULL;
}
@@ -455,7 +446,6 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
{
int i, cur;
struct page **cur_page;
- pgprot_t attr = (noncached) ? PAGE_KERNEL_NOCACHE : PAGE_KERNEL;
for (i = 0; i < num_pages; ++i) {
cur = page_offset + i;
@@ -472,12 +462,16 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
DRM_TTM_PAGE_UNCACHED) != noncached) {
DRM_MASK_VAL(ttm->page_flags[cur],
DRM_TTM_PAGE_UNCACHED, noncached);
- change_page_attr(*cur_page, 1, attr);
+ if (noncached) {
+ map_page_into_agp(*cur_page);
+ } else {
+ unmap_page_from_agp(*cur_page);
+ }
}
}
}
if (do_tlbflush)
- global_flush_tlb();
+ flush_agp_mappings();
return 0;
}
@@ -612,7 +606,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
drm_destroy_ttm_region(entry);
return -ENOMEM;
}
- *cur_page = alloc_page(GFP_KERNEL);
+ *cur_page = drm_alloc_gatt_pages(0);
if (!*cur_page) {
DRM_ERROR("Page allocation failed\n");
drm_destroy_ttm_region(entry);
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index aed0e04f..76d7fb4e 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -274,8 +274,7 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
return NOPAGE_OOM;
}
++bm->cur_pages;
- page = ttm->pages[page_offset] =
- alloc_page(GFP_KERNEL);
+ page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
}
if (!page)
return NOPAGE_OOM;