summaryrefslogtreecommitdiff
AgeCommit message (Expand)Author
2008-05-12[GEM] Typo (and thinking) fixes in drm-gem.txt and doxygen.Eric Anholt
2008-05-12[intel] Minor kludge -- wait for the ring to be nearly empty before queuingKeith Packard
2008-05-12[intel] When polling for ring space, sleep for a lot longer (10ms)Keith Packard
2008-05-12[gem] Set write domain to CPU when doing pwrite.Keith Packard
2008-05-12[gem] Clarify use of explicit domain control. Remove Gen3 from I-cache usage.Keith Packard
2008-05-12fix kernel oops when removing fbHong Liu
2008-05-12free dummy read page if fail to init mmHong Liu
2008-05-12modeset init code cleanupHong Liu
2008-05-12fix G33 hardware status page in modesetHong Liu
2008-05-12RADEON: fix copy/pasto in last commitAlex Deucher
2008-05-12R3/4/5: init pipe setup in drmAlex Deucher
2008-05-12RADEON: cleanup radeon_do_engine_reset()Alex Deucher
2008-05-12R300+: fixup pixcache flushAlex Deucher
2008-05-12RS4xx: fix MCIND index maskAlex Deucher
2008-05-12RADEON: write AGP_BASE_2 on chips that support itAlex Deucher
2008-05-12R300+: fixup PURGE/FLUSH macrosAlex Deucher
2008-05-12Radeon IGP: merge RS4xx/RS6xx gart setupAlex Deucher
2008-05-12Radeon IGP: wrap MCIND accessAlex Deucher
2008-05-12Radeon IGP: clean up registers and magic numbersAlex Deucher
2008-05-12drm: remove root only from a lot of drm ioctls to get stuff running as non-rootDave Airlie
2008-05-12drm: masters are always authenticatedDave Airlie
2008-05-12drm: fix oops on reading proc file with no masterDave Airlie
2008-05-11[GEM] Make pread/pwrite manage memory domains. No luck with movnti though.Keith Packard
2008-05-10[intel-GEM] exec list can contain pinned, lru cannot.Keith Packard
2008-05-10Merge commit 'anholt/drm-gem' into drm-gemKeith Packard
2008-05-10[GEM] Add drm-gem.txtKeith Packard
2008-05-10[intel-GEM] Clean up GEM ioctl naming.Keith Packard
2008-05-09GEM: Fix arguments to drm_memrange_init so we don't exceed our allocation.Eric Anholt
2008-05-09GEM: Separate the LRU into execution list and LRU list.Eric Anholt
2008-05-09GEM: Clear obj_priv->agp_mem when we free it.Eric Anholt
2008-05-09GEM: Avoid leaking refs on target objects on presumed offset success.Eric Anholt
2008-05-09fixup i915 workqueue handling when modeset=1Hong Liu
2008-05-09i915: use BDB TV flag for TV detectionJesse Barnes
2008-05-09i915: add basic VBT supportJesse Barnes
2008-05-09[gem] API cleanup. allocate->create unreference->close name->flinkKeith Packard
2008-05-09Fix build problemsAlan Hourihane
2008-05-09Fix test applications for recent DRM changesAlan Hourihane
2008-05-08[i915] clean up whinging from checkpatch.plKeith Packard
2008-05-08Clean up whinging from checkpatch.pl in drm_gem.cKeith Packard
2008-05-08GEM: Fix oops on NULL dereference when we try clflushing when we don't need to.Eric Anholt
2008-05-08i915: Changed intel_fb to use the new drm_crtc_set_config interfaceJakob Bornecrantz
2008-05-08[intel-gem] Move domains to relocation records. add set_domain ioctl.Keith Packard
2008-05-08i915: Fixed indent in intel_fb.cJakob Bornecrantz
2008-05-08drm: Made set_config use drm_mode_set as a argumentJakob Bornecrantz
2008-05-08Revert "i915: fix vbl swap for multi-master"Dave Airlie
2008-05-08i915: fix vbl swap for multi-masterDave Airlie
2008-05-08drm: check for NULL fb here, shouldn't happen but avoid oops for nowDave Airlie
2008-05-08drm: set crtc->fb to NULLDave Airlie
2008-05-08intel: set correct limits on screen width/height from DDXDave Airlie
2008-05-08drm: fix replacefb to change fb propertiesDave Airlie
f='#n379'>379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
/**
 * \file drm_vm.c
 * Memory mapping for DRM
 *
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 *
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include "drmP.h"

#if defined(__ia64__)
#include <linux/efi.h>
#endif

static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static int drm_bo_mmap_locked(struct vm_area_struct *vma,
			      struct file *filp,
			      drm_local_map_t *map);


pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
{
	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);

#if defined(__i386__) || defined(__x86_64__)
	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
		pgprot_val(tmp) |= _PAGE_PCD;
		pgprot_val(tmp) &= ~_PAGE_PWT;
	}
#elif defined(__powerpc__)
	pgprot_val(tmp) |= _PAGE_NO_CACHE;
	if (map_type == _DRM_REGISTERS)
		pgprot_val(tmp) |= _PAGE_GUARDED;
#elif defined(__ia64__)
	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
				    vma->vm_start))
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#elif defined(__sparc__)
	tmp = pgprot_noncached(tmp);
#endif
	return tmp;
}

static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
{
	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);

#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
	tmp |= _PAGE_NO_CACHE;
#endif
	return tmp;
}

#ifndef DRM_VM_NOPAGE
/**
 * \c fault method for AGP virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Find the right map and if it's AGP memory find the real physical page to
 * map, get the page, increment the use count and return it.
 */
#if __OS_HAS_AGP
static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_map *map = NULL;
	struct drm_map_list *r_list;
	struct drm_hash_item *hash;

	/*
	 * Find the right map
	 */
	if (!drm_core_has_AGP(dev))
		goto vm_fault_error;

	if (!dev->agp || !dev->agp->cant_use_aperture)
		goto vm_fault_error;

	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
		goto vm_fault_error;

	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
	map = r_list->map;

	if (map && map->type == _DRM_AGP) {
		/*
		 * Using vm_pgoff as a selector forces us to use this unusual
		 * addressing scheme.
		 */
		unsigned long offset = (unsigned long)vmf->virtual_address -
								vma->vm_start;
		unsigned long baddr = map->offset + offset;
		struct drm_agp_mem *agpmem;
		struct page *page;

#ifdef __alpha__
		/*
		 * Adjust to a bus-relative address
		 */
		baddr -= dev->hose->mem_space->start;
#endif

		/*
		 * It's AGP memory - find the real physical page to map
		 */
		list_for_each_entry(agpmem, &dev->agp->memory, head) {
			if (agpmem->bound <= baddr &&
			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
				break;
		}

		if (!agpmem)
			goto vm_fault_error;

		/*
		 * Get the page, inc the use count, and return it
		 */
		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
		page = virt_to_page(__va(agpmem->memory->memory[offset]));
		get_page(page);
		vmf->page = page;

		DRM_DEBUG
		    ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
		     baddr, __va(agpmem->memory->memory[offset]), offset,
		     page_count(page));
		return 0;
	}
vm_fault_error:
	return VM_FAULT_SIGBUS;	/* Disallow mremap */
}
#else				/* __OS_HAS_AGP */
static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	return VM_FAULT_SIGBUS;
}
#endif				/* __OS_HAS_AGP */

/**
 * \c nopage method for shared virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Get the mapping, find the real physical page to map, get the page, and
 * return it.
 */
static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
	unsigned long offset;
	unsigned long i;
	struct page *page;

	if (!map)
		return VM_FAULT_SIGBUS;	/* Nothing allocated */

	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
	i = (unsigned long)map->handle + offset;
	page = vmalloc_to_page((void *)i);
	if (!page)
		return VM_FAULT_SIGBUS;
	get_page(page);
	vmf->page = page;

	DRM_DEBUG("shm_fault 0x%lx\n", offset);
	return 0;
}
#endif

/**
 * \c close method for shared virtual memory.
 *
 * \param vma virtual memory area.
 *
 * Deletes map information if we are the last
 * person to close a mapping and it's not in the global maplist.
 */
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_vma_entry *pt, *temp;
	struct drm_map *map;
	struct drm_map_list *r_list;
	int found_maps = 0;

	DRM_DEBUG("0x%08lx,0x%08lx\n",
		  vma->vm_start, vma->vm_end - vma->vm_start);
	atomic_dec(&dev->vma_count);

	map = vma->vm_private_data;

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
		if (pt->vma->vm_private_data == map)
			found_maps++;
		if (pt->vma == vma) {
			list_del(&pt->head);
			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
		}
	}
	/* We were the only map that was found */
	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
		/* Check to see if we are in the maplist, if we are not, then
		 * we delete this mappings information.
		 */
		found_maps = 0;
		list_for_each_entry(r_list, &dev->maplist, head) {
			if (r_list->map == map)
				found_maps++;
		}

		if (!found_maps) {
			drm_dma_handle_t dmah;

			switch (map->type) {
			case _DRM_REGISTERS:
			case _DRM_FRAME_BUFFER:
				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
					int retcode;
					retcode = mtrr_del(map->mtrr,
							   map->offset,
							   map->size);
					DRM_DEBUG("mtrr_del = %d\n", retcode);
				}
				iounmap(map->handle);
				break;
			case _DRM_SHM:
				vfree(map->handle);
				break;
			case _DRM_AGP:
			case _DRM_SCATTER_GATHER:
				break;
			case _DRM_CONSISTENT:
				dmah.vaddr = map->handle;
				dmah.busaddr = map->offset;
				dmah.size = map->size;
				__drm_pci_free(dev, &dmah);
				break;
			case _DRM_TTM:
				BUG_ON(1);
				break;
			}
			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
		}
	}
	mutex_unlock(&dev->struct_mutex);
}

#ifndef DRM_VM_NOPAGE
/**
 * \c fault method for DMA virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
 */
static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_device_dma *dma = dev->dma;
	unsigned long offset;
	unsigned long page_nr;
	struct page *page;

	if (!dma)
		return VM_FAULT_SIGBUS;	/* Error */
	if (!dma->pagelist)
		return VM_FAULT_SIGBUS;	/* Nothing allocated */

	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));

	get_page(page);
	vmf->page = page;

	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
	return 0;
}

/**
 * \c fault method for scatter-gather virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
 */
static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_sg_mem *entry = dev->sg;
	unsigned long offset;
	unsigned long map_offset;
	unsigned long page_offset;
	struct page *page;

	if (!entry)
		return VM_FAULT_SIGBUS;	/* Error */
	if (!entry->pagelist)
		return VM_FAULT_SIGBUS;	/* Nothing allocated */

	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
	map_offset = map->offset - (unsigned long)dev->sg->virtual;
	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
	page = entry->pagelist[page_offset];
	get_page(page);
	vmf->page = page;

	return 0;
}
#endif

/** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = {
#ifdef DRM_VM_NOPAGE
	.nopage = drm_vm_nopage,
#else
	.fault = drm_do_vm_fault,
#endif
	.open = drm_vm_open,
	.close = drm_vm_close,
};

/** Shared virtual memory operations */
static struct vm_operations_struct drm_vm_shm_ops = {
#ifdef DRM_VM_NOPAGE
	.nopage = drm_vm_shm_nopage,
#else
	.fault = drm_do_vm_shm_fault,
#endif
	.open = drm_vm_open,
	.close = drm_vm_shm_close,
};

/** DMA virtual memory operations */
static struct vm_operations_struct drm_vm_dma_ops = {
#ifdef DRM_VM_NOPAGE
	.nopage = drm_vm_dma_nopage,
#else
	.fault = drm_do_vm_dma_fault,
#endif
	.open = drm_vm_open,
	.close = drm_vm_close,
};

/** Scatter-gather virtual memory operations */
static struct vm_operations_struct drm_vm_sg_ops = {
#ifdef DRM_VM_NOPAGE
	.nopage = drm_vm_sg_nopage,
#else
	.fault = drm_do_vm_sg_fault,
#endif
	.open = drm_vm_open,
	.close = drm_vm_close,
};

/**
 * \c open method for shared virtual memory.
 *
 * \param vma virtual memory area.
 *
 * Create a new drm_vma_entry structure as the \p vma private data entry and
 * add it to drm_device::vmalist.
 */
static void drm_vm_open_locked(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_vma_entry *vma_entry;

	DRM_DEBUG("0x%08lx,0x%08lx\n",
		  vma->vm_start, vma->vm_end - vma->vm_start);
	atomic_inc(&dev->vma_count);

	vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
	if (vma_entry) {
		vma_entry->vma = vma;
		vma_entry->pid = current->pid;
		list_add(&vma_entry->head, &dev->vmalist);
	}
}

static void drm_vm_open(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->minor->dev;

	mutex_lock(&dev->struct_mutex);
	drm_vm_open_locked(vma);
	mutex_unlock(&dev->struct_mutex);
}

/**
 * \c close method for all virtual memory types.
 *
 * \param vma virtual memory area.
 *
 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
 * free it.
 */
static void drm_vm_close(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_vma_entry *pt, *temp;

	DRM_DEBUG("0x%08lx,0x%08lx\n",
		  vma->vm_start, vma->vm_end - vma->vm_start);
	atomic_dec(&dev->vma_count);

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
		if (pt->vma == vma) {
			list_del(&pt->head);
			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
			break;
		}
	}
	mutex_unlock(&dev->struct_mutex);
}


/**
 * mmap DMA memory.
 *
 * \param file_priv DRM file private.
 * \param vma virtual memory area.
 * \return zero on success or a negative number on failure.
 *
 * Sets the virtual memory area operations structure to vm_dma_ops, the file
 * pointer, and calls vm_open().
 */
static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev;
	struct drm_device_dma *dma;
	unsigned long length = vma->vm_end - vma->vm_start;

	dev = priv->minor->dev;
	dma = dev->dma;
	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
		  vma->vm_start, vma->vm_end, vma->vm_pgoff);

	/* Length must match exact page count */
	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
		return -EINVAL;
	}

	if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
		/* Ye gads this is ugly.  With more thought
		   we could move this up higher and use
		   `protection_map' instead.  */
		vma->vm_page_prot =
		    __pgprot(pte_val
			     (pte_wrprotect
			      (__pte(pgprot_val(vma->vm_page_prot)))));
#endif
	}

	vma->vm_ops = &drm_vm_dma_ops;
	vma->vm_flags |= VM_RESERVED;	/* Don't swap */

	vma->vm_file = filp;	/* Needed for drm_vm_open() */
	drm_vm_open_locked(vma);
	return 0;
}

unsigned long drm_core_get_map_ofs(struct drm_map * map)
{
	return map->offset;
}
EXPORT_SYMBOL(drm_core_get_map_ofs);

unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
	return dev->hose->dense_mem_base - dev->hose->mem_space->start;
#else
	return 0;
#endif
}
EXPORT_SYMBOL(drm_core_get_reg_ofs);

/**
 * mmap DMA memory.
 *
 * \param file_priv DRM file private.
 * \param vma virtual memory area.
 * \return zero on success or a negative number on failure.
 *
 * If the virtual memory area has no offset associated with it then it's a DMA
 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
 * checks that the restricted flag is not set, sets the virtual memory operations
 * according to the mapping type and remaps the pages. Finally sets the file
 * pointer and calls vm_open().
 */
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_map *map = NULL;
	unsigned long offset = 0;
	struct drm_hash_item *hash;

	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
		  vma->vm_start, vma->vm_end, vma->vm_pgoff);

	if (!priv->authenticated)
		return -EACCES;

	/* We check for "dma". On Apple's UniNorth, it's valid to have
	 * the AGP mapped at physical address 0
	 * --BenH.
	 */

	if (!vma->vm_pgoff
#if __OS_HAS_AGP
	    && (!dev->agp
		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
#endif
	    )
		return drm_mmap_dma(filp, vma);

	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
		DRM_ERROR("Could not find map\n");
		return -EINVAL;
	}

	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
		return -EPERM;

	/* Check for valid size. */
	if (map->size < vma->vm_end - vma->vm_start)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
		/* Ye gads this is ugly.  With more thought
		   we could move this up higher and use
		   `protection_map' instead.  */
		vma->vm_page_prot =
		    __pgprot(pte_val
			     (pte_wrprotect
			      (__pte(pgprot_val(vma->vm_page_prot)))));
#endif
	}

	switch (map->type) {
	case _DRM_AGP:
		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
			/*
			 * On some platforms we can't talk to bus dma address from the CPU, so for
			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
			 * pages and mappings in nopage()
			 */
#if defined(__powerpc__)
			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
#endif
			vma->vm_ops = &drm_vm_ops;
			break;
		}
		/* fall through to _DRM_FRAME_BUFFER... */
	case _DRM_FRAME_BUFFER:
	case _DRM_REGISTERS:
		offset = dev->driver->get_reg_ofs(dev);
		vma->vm_flags |= VM_IO;	/* not in core dump */
		vma->vm_page_prot = drm_io_prot(map->type, vma);
		if (io_remap_pfn_range(vma, vma->vm_start,
				       (map->offset + offset) >> PAGE_SHIFT,
				       vma->vm_end - vma->vm_start,
				       vma->vm_page_prot))
			return -EAGAIN;
		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
			  " offset = 0x%lx\n",
			  map->type,
			  vma->vm_start, vma->vm_end, map->offset + offset);
		vma->vm_ops = &drm_vm_ops;
		break;
	case _DRM_CONSISTENT:
		/* Consistent memory is really like shared memory. But
		 * it's allocated in a different way, so avoid nopage */
		if (remap_pfn_range(vma, vma->vm_start,
		    page_to_pfn(virt_to_page(map->handle)),
		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
			return -EAGAIN;
		vma->vm_page_prot = drm_dma_prot(map->type, vma);
	/* fall through to _DRM_SHM */
	case _DRM_SHM:
		vma->vm_ops = &drm_vm_shm_ops;
		vma->vm_private_data = (void *)map;
		/* Don't let this area swap.  Change when
		   DRM_KERNEL advisory is supported. */
		vma->vm_flags |= VM_RESERVED;
		break;
	case _DRM_SCATTER_GATHER:
		vma->vm_ops = &drm_vm_sg_ops;
		vma->vm_private_data = (void *)map;
		vma->vm_flags |= VM_RESERVED;
		vma->vm_page_prot = drm_dma_prot(map->type, vma);
		break;
	case _DRM_TTM:
		return drm_bo_mmap_locked(vma, filp, map);
	default:
		return -EINVAL;	/* This should never happen. */
	}
	vma->vm_flags |= VM_RESERVED;	/* Don't swap */

	vma->vm_file = filp;	/* Needed for drm_vm_open() */
	drm_vm_open_locked(vma);
	return 0;
}

int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = drm_mmap_locked(filp, vma);
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
EXPORT_SYMBOL(drm_mmap);

/**
 * buffer object vm functions.
 */

/**
 * \c Pagefault method for buffer objects.
 *
 * \param vma Virtual memory area.
 * \param vmf vm fault data
 * \return Error or VM_FAULT_NOPAGE:. The pfn is manually inserted.
 *
 * It's important that pfns are inserted while holding the bo->mutex lock.
 * otherwise we might race with unmap_mapping_range() which is always
 * called with the bo->mutex lock held.
 *
 * We're modifying the page attribute bits of the vma->vm_page_prot field,
 * without holding the mmap_sem in write mode. Only in read mode.
 * These bits are not used by the mm subsystem code, and we consider them
 * protected by the bo->mutex lock.
 */

#ifdef DRM_FULL_MM_COMPAT
static int drm_bo_vm_fault(struct vm_area_struct *vma,
				     struct vm_fault *vmf)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
	unsigned long page_offset;
	struct page *page = NULL;
	struct drm_ttm *ttm;
	struct drm_device *dev;
	unsigned long pfn;
	int err;
	unsigned long bus_base;
	unsigned long bus_offset;
	unsigned long bus_size;
	unsigned long ret = VM_FAULT_NOPAGE;

	dev = bo->dev;
	err = mutex_lock_interruptible(&bo->mutex);
	if (err) {
		return VM_FAULT_NOPAGE;
	}

	err = drm_bo_wait(bo, 0, 1, 0, 1);
	if (err) {
		ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
		bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
		goto out_unlock;
	}

	bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;

	/*
	 * If buffer happens to be in a non-mappable location,
	 * move it to a mappable.
	 */

	if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
		uint32_t new_flags = bo->mem.proposed_flags |
			DRM_BO_FLAG_MAPPABLE |
			DRM_BO_FLAG_FORCE_MAPPABLE;
		err = drm_bo_move_buffer(bo, new_flags, 0, 0);
		if (err) {
			ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
			goto out_unlock;
		}
	}

	err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
				&bus_size);

	if (err) {
		ret = VM_FAULT_SIGBUS;
		goto out_unlock;
	}

	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;

	if (bus_size) {
		struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];

		pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
		vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
	} else {
		ttm = bo->ttm;

		drm_ttm_fixup_caching(ttm);
		page = drm_ttm_get_page(ttm, page_offset);
		if (!page) {
			ret = VM_FAULT_OOM;
			goto out_unlock;
		}
		pfn = page_to_pfn(page);
		vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
			vm_get_page_prot(vma->vm_flags) :
			drm_io_prot(_DRM_TTM, vma);
	}

	err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
	if (err) {
		ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
		goto out_unlock;
	}
out_unlock:
	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
	mutex_unlock(&bo->mutex);
	return ret;
}
#endif

static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;

	drm_vm_open_locked(vma);
	atomic_inc(&bo->usage);
#ifdef DRM_ODD_MM_COMPAT
	drm_bo_add_vma(bo, vma);
#endif
}

/**
 * \c vma open method for buffer objects.
 *
 * \param vma virtual memory area.
 */

static void drm_bo_vm_open(struct vm_area_struct *vma)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
	struct drm_device *dev = bo->dev;

	mutex_lock(&dev->struct_mutex);
	drm_bo_vm_open_locked(vma);
	mutex_unlock(&dev->struct_mutex);
}

/**
 * \c vma close method for buffer objects.
 *
 * \param vma virtual memory area.
 */

static void drm_bo_vm_close(struct vm_area_struct *vma)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
	struct drm_device *dev = bo->dev;

	drm_vm_close(vma);
	if (bo) {
		mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
		drm_bo_delete_vma(bo, vma);
#endif
		drm_bo_usage_deref_locked((struct drm_buffer_object **)
					  &vma->vm_private_data);
		mutex_unlock(&dev->struct_mutex);
	}
	return;
}

static struct vm_operations_struct drm_bo_vm_ops = {
#ifdef DRM_FULL_MM_COMPAT
#ifdef DRM_NO_FAULT
	.nopfn = drm_bo_vm_nopfn,
#else
	.fault = drm_bo_vm_fault,
#endif
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
	.nopfn = drm_bo_vm_nopfn,
#else
	.nopage = drm_bo_vm_nopage,
#endif
#endif
	.open = drm_bo_vm_open,
	.close = drm_bo_vm_close,
};

/**
 * mmap buffer object memory.
 *
 * \param vma virtual memory area.
 * \param file_priv DRM file private.
 * \param map The buffer object drm map.
 * \return zero on success or a negative number on failure.
 */

int drm_bo_mmap_locked(struct vm_area_struct *vma,
		       struct file *filp,
		       drm_local_map_t *map)
{
	vma->vm_ops = &drm_bo_vm_ops;
	vma->vm_private_data = map->handle;
	vma->vm_file = filp;
	vma->vm_flags |= VM_RESERVED | VM_IO;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
	vma->vm_flags |= VM_PFNMAP;
#endif
	drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
	drm_bo_map_bound(vma);
#endif
	return 0;
}