/* r128_drv.h -- Private header for r128 driver -*- linux-c -*- * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com */ /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Kevin E. Martin * Gareth Hughes * Michel D�zer */ #ifndef __R128_DRV_H__ #define __R128_DRV_H__ /* General customization: */ #define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." #define DRIVER_NAME "r128" #define DRIVER_DESC "ATI Rage 128" #define DRIVER_DATE "20030725" /* Interface history: * * ?? - ?? * 2.4 - Add support for ycbcr textures (no new ioctls) * 2.5 - Add FLIP ioctl, disable FULLSCREEN. */ #define DRIVER_MAJOR 2 #define DRIVER_MINOR 5 #define DRIVER_PATCHLEVEL 0 #define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR ) typedef struct drm_r128_freelist { unsigned int age; struct drm_buf *buf; struct drm_r128_freelist *next; struct drm_r128_freelist *prev; } drm_r128_freelist_t; typedef struct drm_r128_ring_buffer { u32 *start; u32 *end; int size; int size_l2qw; u32 tail; u32 tail_mask; int space; int high_mark; } drm_r128_ring_buffer_t; typedef struct drm_r128_private { drm_r128_ring_buffer_t ring; drm_r128_sarea_t *sarea_priv; int cce_mode; int cce_fifo_size; int cce_running; drm_r128_freelist_t *head; drm_r128_freelist_t *tail; int usec_timeout; int is_pci; unsigned long cce_buffers_offset; atomic_t idle_count; int page_flipping; int current_page; u32 crtc_offset; u32 crtc_offset_cntl; atomic_t vbl_received; u32 color_fmt; unsigned int front_offset; unsigned int front_pitch; unsigned int back_offset; unsigned int back_pitch; u32 depth_fmt; unsigned int depth_offset; unsigned int depth_pitch; unsigned int span_offset; u32 front_pitch_offset_c; u32 back_pitch_offset_c; u32 depth_pitch_offset_c; u32 span_pitch_offset_c; drm_local_map_t *sarea; drm_local_map_t *mmio; drm_local_map_t *cce_ring; drm_local_map_t *ring_rptr; drm_local_map_t *agp_textures; struct drm_ati_pcigart_info gart_info; } drm_r128_private_t; typedef struct drm_r128_buf_priv { u32 age; int prim; int discard; int dispatched; drm_r128_freelist_t *list_entry; } drm_r128_buf_priv_t; extern struct drm_ioctl_desc r128_ioctls[]; extern int r128_max_ioctl; /* r128_cce.c */ extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void r128_freelist_reset(struct drm_device * dev); extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); extern int r128_do_cleanup_cce(struct drm_device * dev); extern int r128_enable_vblank(struct drm_device *dev, int crtc); extern void r128_disable_vblank(struct drm_device *dev, int crtc); extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); extern void r128_driver_irq_preinstall(struct drm_device * dev); extern int r128_driver_irq_postinstall(struct drm_device * dev); extern void r128_driver_irq_uninstall(struct drm_device * dev); extern void r128_driver_lastclose(struct drm_device * dev); extern void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv); extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* Register definitions, register access macros and drmAddMap constants * for Rage 128 kernel driver. */ #define R128_AUX_SC_CNTL 0x1660 # define R128_AUX1_SC_EN (1 << 0) # define R128_AUX1_SC_MODE_OR (0 << 1) # define R128_AUX1_SC_MODE_NAND (1 << 1) # define R128_AUX2_SC_EN (1 << 2) # define R128_AUX2_SC_MODE_OR (0 << 3) # define R128_AUX2_SC_MODE_NAND (1 << 3) # define R128_AUX3_SC_EN (1 << 4) # define R128_AUX3_SC_MODE_OR (0 << 5) # define R128_AUX3_SC_MODE_NAND (1 << 5) #define R128_AUX1_SC_LEFT 0x1664 #define R128_AUX1_SC_RIGHT 0x1668 #define R128_AUX1_SC_TOP 0x166c #define R128_AUX1_SC_BOTTOM 0x1670 #define R128_AUX2_SC_LEFT 0x1674 #define R128_AUX2_SC_RIGHT 0x1678 #define R128_AUX2_SC_TOP 0x167c #define R128_AUX2_SC_BOTTOM 0x1680 #define R128_AUX3_SC_LEFT 0x1684 #define R128_AUX3_SC_RIGHT 0x1688 #define R128_AUX3_SC_TOP 0x168c #define R128_AUX3_SC_BOTTOM 0x1690 #define R128_BRUSH_DATA0 0x1480 #define R128_BUS_CNTL 0x0030 # define R128_BUS_MASTER_DIS (1 << 6) #define R128_CLOCK_CNTL_INDEX 0x0008 #define R128_CLOCK_CNTL_DATA 0x000c # define R128_PLL_WR_EN (1 << 7) #define R128_CONSTANT_COLOR_C 0x1d34 #define R128_CRTC_OFFSET 0x0224 #define R128_CRTC_OFFSET_CNTL 0x0228 # define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16) #define R128_DP_GUI_MASTER_CNTL 0x146c # define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) # define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) # define R128_GMC_BRUSH_SOLID_COLOR (13 << 4) # define R128_GMC_BRUSH_NONE (15 << 4) # define R128_GMC_DST_16BPP (4 << 8) # define R128_GMC_DST_24BPP (5 << 8) # define R128_GMC_DST_32BPP (6 << 8) # define R128_GMC_DST_DATATYPE_SHIFT 8 # define R128_GMC_SRC_DATATYPE_COLOR (3 << 12) # define R128_DP_SRC_SOURCE_MEMORY (2 << 24) # define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24) # define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28) # define R128_GMC_AUX_CLIP_DIS (1 << 29) # define R128_GMC_WR_MSK_DIS (1 << 30) # define R128_ROP3_S 0x00cc0000 # define R128_ROP3_P 0x00f00000 #define R128_DP_WRITE_MASK 0x16cc #define R128_DST_PITCH_OFFSET_C 0x1c80 # define R128_DST_TILE (1 << 31) #define R128_GEN_INT_CNTL 0x0040 # define R128_CRTC_VBLANK_INT_EN (1 << 0) #define R128_GEN_INT_STATUS 0x0044 # define R128_CRTC_VBLANK_INT (1 << 0) # define R128_CRTC_VBLANK_INT_AK (1 << 0) #define R128_GEN_RESET_CNTL 0x00f0 # define R128_SOFT_RESET_GUI (1 << 0) #define R128_GUI_SCRATCH_REG0 0x15e0 #define R128_GUI_SCRATCH_REG1 0x15e4 #define R128_GUI_SCRATCH_REG2 0x15e8 #define R128_GUI_SCRATCH_REG3 0x15ec #define R128_GUI_SCRATCH_REG4 0x15f0 #define R128_GUI_SCRATCH_REG5 0x15f4 #define R128_GUI_STAT 0x1740 # define R128_GUI_FIFOCNT_MASK 0x0fff # define R128_GUI_ACTIVE (1 << 31) #define R128_MCLK_CNTL 0x000f # define R128_FORCE_GCP (1 << 16) # define R128_FORCE_PIPE3D_CP (1 << 17) # define R128_FORCE_RCP (1 << 18) #define R128_PC_GUI_CTLSTAT 0x1748 #define R128_PC_NGUI_CTLSTAT 0x0184 # define R128_PC_FLUSH_GUI (3 << 0) # define R128_PC_RI_GUI (1 << 2) # define R128_PC_FLUSH_ALL 0x00ff # define R128_PC_BUSY (1 << 31) #define R128_PCI_GART_PAGE 0x017c #define R128_PRIM_TEX_CNTL_C 0x1cb0 #define R128_SCALE_3D_CNTL 0x1a00 #define R128_SEC_TEX_CNTL_C 0x1d00 #define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c #define R128_SETUP_CNTL 0x1bc4 #define R128_STEN_REF_MASK_C 0x1d40 #define R128_TEX_CNTL_C 0x1c9c # define R128_TEX_CACHE_FLUSH (1 << 23) #define R128_WAIT_UNTIL 0x1720 # define R128_EVENT_CRTC_OFFSET (1 << 0) #define R128_WINDOW_XY_OFFSET 0x1bcc /* CCE registers */ #define R128_PM4_BUFFER_OFFSET 0x0700 #define R128_PM4_BUFFER_CNTL 0x0704 # define R128_PM4_MASK (15 << 28) # define R128_PM4_NONPM4 (0 << 28) # define R128_PM4_192PIO (1 << 28) # define R128_PM4_192BM (2 << 28) # define R128_PM4_128PIO_64INDBM (3 << 28) # define R128_PM4_128BM_64INDBM (4 << 28) # define R128_PM4_64PIO_128INDBM (5 << 28) # define R128_PM4_64BM_128INDBM (6 << 28) # define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) # define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) # define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) # define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27) #define R128_PM4_BUFFER_WM_CNTL 0x0708 # define R128_WMA_SHIFT 0 # define R128_WMB_SHIFT 8 # define R128_WMC_SHIFT 16 # define R128_WB_WM_SHIFT 24 #define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c #define R128_PM4_BUFFER_DL_RPTR 0x0710 #define R128_PM4_BUFFER_DL_WPTR 0x0714 # define R128_PM4_BUFFER_DL_DONE (1 << 31) #define R128_PM4_VC_FPU_SETUP 0x071c #define R128_PM4_IW_INDOFF 0x0738 #define R128_PM4_IW_INDSIZE 0x073c #define R128_PM4_STAT 0x07b8 # define R128_PM4_FIFOCNT_MASK 0x0fff # define R128_PM4_BUSY (1 << 16) # define R128_PM4_GUI_ACTIVE (1 << 31) #define R128_PM4_MICROCODE_ADDR 0x07d4 #define R128_PM4_MICROCODE_RADDR 0x07d8 #define R128_PM4_MICROCODE_DATAH 0x07dc #define R128_PM4_MICROCODE_DATAL 0x07e0 #define R128_PM4_BUFFER_ADDR 0x07f0 #define R128_PM4_MICRO_CNTL 0x07fc # define R128_PM4_MICRO_FREERUN (1 << 30) #define R128_PM4_FIFO_DATA_EVEN 0x1000 #define R128_PM4_FIFO_DATA_ODD 0x1004 /* CCE command packets */ #define R128_CCE_PACKET0 0x00000000 #define R128_CCE_PACKET1 0x40000000 #define R128_CCE_PACKET2 0x80000000 #define R128_CCE_PACKET3 0xC0000000 # define R128_CNTL_HOSTDATA_BLT 0x00009400 # define R128_CNTL_PAINT_MULTI 0x00009A00 # define R128_CNTL_BITBLT_MULTI 0x00009B00 # define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300 #define R128_CCE_PACKET_MASK 0xC0000000 #define R128_CCE_PACKET_COUNT_MASK 0x3fff0000 #define R128_CCE_PACKET0_REG_MASK 0x000007ff #define R128_CCE_PACKET1_REG0_MASK 0x000007ff #define R128_CCE_PACKET1_REG1_MASK 0x003ff800 #define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000 #define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001 #define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002 #define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006 #define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007 #define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010 #define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020 #define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030 #define R128_CCE_VC_CNTL_NUM_SHIFT 16 #define R128_DATATYPE_VQ 0 #define R128_DATATYPE_CI4 1 #define R128_DATATYPE_CI8 2 #define R128_DATATYPE_ARGB1555 3 #define R128_DATATYPE_RGB565 4 #define R128_DATATYPE_RGB888 5 #define R128_DATATYPE_ARGB8888 6 #define R128_DATATYPE_RGB332 7 #define R128_DATATYPE_Y8 8 #define R128_DATATYPE_RGB8 9 #define R128_DATATYPE_CI16 10 #define R128_DATATYPE_YVYU422 11 #define R128_DATATYPE_VYUY422 12 #define R128_DATATYPE_AYUV444 14 #define R128_DATATYPE_ARGB4444 15 /* Constants */ #define R128_AGP_OFFSET 0x02000000 #define R128_WATERMARK_L 16 #define R128_WATERMARK_M 8 #define R128_WATERMARK_N 8 #define R128_WATERMARK_K 128 #define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0 #define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1 #define R128_MAX_VB_AGE 0x7fffffff #define R128_MAX_VB_VERTS (0xffff) #define R128_RING_HIGH_MARK 128 #define R128_PERFORMANCE_BOXES 0 #define R128_PCIGART_TABLE_SIZE 32768 #define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) #define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) #define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) #define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) #define R128_WRITE_PLL(addr,val) \ do { \ R128_WRITE8(R128_CLOCK_CNTL_INDEX, \ ((addr) & 0x1f) | R128_PLL_WR_EN); \ R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \ } while (0) #define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \ ((n) << 16) | ((reg) >> 2)) #define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \ (((reg1) >> 2) << 11) | ((reg0) >> 2)) #define CCE_PACKET2() (R128_CCE_PACKET2) #define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \ (pkt) | ((n) << 16)) static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv) { drm_r128_ring_buffer_t *ring = &dev_priv->ring; ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); if (ring->space <= 0) ring->space += ring->size; } /* ================================================================ * Misc helper macros */ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ do { \ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ if ( ring->space < ring->high_mark ) { \ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ r128_update_ring_snapshot( dev_priv ); \ if ( ring->space >= ring->high_mark ) \ goto __ring_space_done; \ DRM_UDELAY(1); \ } \ DRM_ERROR( "ring space check failed!\n" ); \ return -EBUSY; \ } \ __ring_space_done: \ ; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ do { \ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \ int __ret = r128_do_cce_idle( dev_priv ); \ if ( __ret ) return __ret; \ sarea_priv->last_dispatch = 0; \ r128_freelist_reset( dev ); \ } \ } while (0) #define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \ OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \ OUT_RING( R128_EVENT_CRTC_OFFSET ); \ } while (0) /* ================================================================ * Ring control */ #define R128_VERBOSE 0 #define RING_LOCALS \ int write, _nr; unsigned int tail_mask; volatile u32 *ring; #define BEGIN_RING( n ) do { \ if ( R128_VERBOSE ) { \ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ } \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ COMMIT_RING(); \ r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ } \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ write = dev_priv->ring.tail; \ tail_mask = dev_priv->ring.tail_mask; \ } while (0) /* You can set this to zero if you want. If the card locks up, you'll * need to keep this set. It works around a bug in early revs of the * Rage 128 chipset, where the CCE would read 32 dwords past the end of * the ring buffer before wrapping around. */ #define R128_BROKEN_CCE 1 #define ADVANCE_RING() do { \ if ( R128_VERBOSE ) { \ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ write, dev_priv->ring.tail ); \ } \ if ( R128_BROKEN_CCE && write < 32 ) { \ memcpy( dev_priv->ring.end, \ dev_priv->ring.start, \ write * sizeof(u32) ); \ } \ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \ DRM_ERROR( \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ((dev_priv->ring.tail + _nr) & tail_mask), \ write, __LINE__); \ } else \ dev_priv->ring.tail = write; \ } while (0) #define COMMIT_RING() do { \ if ( R128_VERBOSE ) { \ DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \ dev_priv->ring.tail ); \ } \ DRM_MEMORYBARRIER(); \ R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \ R128_READ( R128_PM4_BUFFER_DL_WPTR ); \ } while (0) #define OUT_RING( x ) do { \ if ( R128_VERBOSE ) { \ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ (unsigned int)(x), write ); \ } \ ring[write++] = cpu_to_le32( x ); \ write &= tail_mask; \ } while (0) #endif /* __R128_DRV_H__ */ 26'>426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
/**
 * \file drm_vm.c
 * Memory mapping for DRM
 *
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 *
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include "drmP.h"

#if defined(__ia64__)
#include <linux/efi.h>
#endif

static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static int drm_bo_mmap_locked(struct vm_area_struct *vma,
			      struct file *filp,
			      drm_local_map_t *map);


pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
{
	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);

#if defined(__i386__) || defined(__x86_64__)
	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
		pgprot_val(tmp) |= _PAGE_PCD;
		pgprot_val(tmp) &= ~_PAGE_PWT;
	}
#elif defined(__powerpc__)
	pgprot_val(tmp) |= _PAGE_NO_CACHE;
	if (map_type == _DRM_REGISTERS)
		pgprot_val(tmp) |= _PAGE_GUARDED;
#endif
#if defined(__ia64__)
	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
				    vma->vm_start))
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
	return tmp;
}


/**
 * \c nopage method for AGP virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Find the right map and if it's AGP memory find the real physical page to
 * map, get the page, increment the use count and return it.
 */
#if __OS_HAS_AGP
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
						unsigned long address)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->head->dev;
	struct drm_map *map = NULL;
	struct drm_map_list *r_list;
	struct drm_hash_item *hash;

	/*
	 * Find the right map
	 */
	if (!drm_core_has_AGP(dev))
		goto vm_nopage_error;

	if (!dev->agp || !dev->agp->cant_use_aperture)
		goto vm_nopage_error;

	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
		goto vm_nopage_error;

	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
	map = r_list->map;

	if (map && map->type == _DRM_AGP) {
		unsigned long offset = address - vma->vm_start;
		unsigned long baddr = map->offset + offset;
		struct drm_agp_mem *agpmem;
		struct page *page;

#ifdef __alpha__
		/*
		 * Adjust to a bus-relative address
		 */
		baddr -= dev->hose->mem_space->start;
#endif

		/*
		 * It's AGP memory - find the real physical page to map
		 */
		list_for_each_entry(agpmem, &dev->agp->memory, head) {
			if (agpmem->bound <= baddr &&
			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
				break;
		}

		if (!agpmem)
			goto vm_nopage_error;

		/*
		 * Get the page, inc the use count, and return it
		 */
		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
		page = virt_to_page(__va(agpmem->memory->memory[offset]));
		get_page(page);

#if 0
		/* page_count() not defined everywhere */
		DRM_DEBUG
		    ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
		     baddr, __va(agpmem->memory->memory[offset]), offset,
		     page_count(page));
#endif

		return page;
	}
      vm_nopage_error:
	return NOPAGE_SIGBUS;	/* Disallow mremap */
}
#else				/* __OS_HAS_AGP */
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
						unsigned long address)
{
	return NOPAGE_SIGBUS;
}
#endif				/* __OS_HAS_AGP */

/**
 * \c nopage method for shared virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Get the the mapping, find the real physical page to map, get the page, and
 * return it.
 */
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
						    unsigned long address)
{
	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
	unsigned long offset;
	unsigned long i;
	struct page *page;

	if (address > vma->vm_end)
		return NOPAGE_SIGBUS;	/* Disallow mremap */
	if (!map)
		return NOPAGE_SIGBUS;	/* Nothing allocated */

	offset = address - vma->vm_start;
	i = (unsigned long)map->handle + offset;
	page = vmalloc_to_page((void *)i);
	if (!page)
		return NOPAGE_SIGBUS;
	get_page(page);

	DRM_DEBUG("shm_nopage 0x%lx\n", address);
	return page;
}

/**
 * \c close method for shared virtual memory.
 *
 * \param vma virtual memory area.
 *
 * Deletes map information if we are the last
 * person to close a mapping and it's not in the global maplist.
 */
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->head->dev;
	struct drm_vma_entry *pt, *temp;
	struct drm_map *map;
	struct drm_map_list *r_list;
	int found_maps = 0;

	DRM_DEBUG("0x%08lx,0x%08lx\n",
		  vma->vm_start, vma->vm_end - vma->vm_start);
	atomic_dec(&dev->vma_count);

	map = vma->vm_private_data;

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
		if (pt->vma->vm_private_data == map)
			found_maps++;
		if (pt->vma == vma) {
			list_del(&pt->head);
			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
		}
	}
	/* We were the only map that was found */
	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
		/* Check to see if we are in the maplist, if we are not, then
		 * we delete this mappings information.
		 */
		found_maps = 0;
		list_for_each_entry(r_list, &dev->maplist, head) {
			if (r_list->map == map)
				found_maps++;
		}

		if (!found_maps) {
			drm_dma_handle_t dmah;

			switch (map->type) {
			case _DRM_REGISTERS:
			case _DRM_FRAME_BUFFER:
				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
					int retcode;
					retcode = mtrr_del(map->mtrr,
							   map->offset,
							   map->size);
					DRM_DEBUG("mtrr_del = %d\n", retcode);
				}
				iounmap(map->handle);
				break;
			case _DRM_SHM:
				vfree(map->handle);
				break;
			case _DRM_AGP:
			case _DRM_SCATTER_GATHER:
				break;
			case _DRM_CONSISTENT:
				dmah.vaddr = map->handle;
				dmah.busaddr = map->offset;
				dmah.size = map->size;
				__drm_pci_free(dev, &dmah);
				break;
		        case _DRM_TTM:
				BUG_ON(1);
				break;
			}
			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
		}
	}
	mutex_unlock(&dev->struct_mutex);
}

/**
 * \c nopage method for DMA virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
 */
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
						    unsigned long address)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->head->dev;
	struct drm_device_dma *dma = dev->dma;
	unsigned long offset;
	unsigned long page_nr;
	struct page *page;

	if (!dma)
		return NOPAGE_SIGBUS;	/* Error */
	if (address > vma->vm_end)
		return NOPAGE_SIGBUS;	/* Disallow mremap */
	if (!dma->pagelist)
		return NOPAGE_SIGBUS;	/* Nothing allocated */

	offset = address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
	page_nr = offset >> PAGE_SHIFT;
	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));

	get_page(page);

	DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
	return page;
}

/**
 * \c nopage method for scatter-gather virtual memory.
 *
 * \param vma virtual memory area.
 * \param address access address.
 * \return pointer to the page structure.
 *
 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
 */
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
						   unsigned long address)
{
	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->head->dev;
	struct drm_sg_mem *entry = dev->sg;
	unsigned long offset;
	unsigned long map_offset;
	unsigned long page_offset;
	struct page *page;

	DRM_DEBUG("\n");
	if (!entry)
		return NOPAGE_SIGBUS;	/* Error */
	if (address > vma->vm_end)
		return NOPAGE_SIGBUS;	/* Disallow mremap */
	if (!entry->pagelist)
		return NOPAGE_SIGBUS;	/* Nothing allocated */

	offset = address - vma->vm_start;
	map_offset = map->offset - (unsigned long)dev->sg->virtual;
	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
	page = entry->pagelist[page_offset];
	get_page(page);

	return page;
}

static struct page *drm_vm_nopage(struct vm_area_struct *vma,
				  unsigned long address, int *type)
{
	if (type)
		*type = VM_FAULT_MINOR;
	return drm_do_vm_nopage(vma, address);
}

static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
				      unsigned long address, int *type)
{
	if (type)
		*type = VM_FAULT_MINOR;
	return drm_do_vm_shm_nopage(vma, address);
}

static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
				      unsigned long address, int *type)
{
	if (type)
		*type = VM_FAULT_MINOR;
	return drm_do_vm_dma_nopage(vma, address);
}

static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
				     unsigned long address, int *type)
{
	if (type)
		*type = VM_FAULT_MINOR;
	return drm_do_vm_sg_nopage(vma, address);
}


/** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = {
	.nopage = drm_vm_nopage,
	.open = drm_vm_open,
	.close = drm_vm_close,
};

/** Shared virtual memory operations */
static struct vm_operations_struct drm_vm_shm_ops = {
	.nopage = drm_vm_shm_nopage,
	.open = drm_vm_open,
	.close = drm_vm_shm_close,
};

/** DMA virtual memory operations */
static struct vm_operations_struct drm_vm_dma_ops = {
	.nopage = drm_vm_dma_nopage,
	.open = drm_vm_open,
	.close = drm_vm_close,
};

/** Scatter-gather virtual memory operations */
static struct vm_operations_struct drm_vm_sg_ops = {
	.nopage = drm_vm_sg_nopage,
	.open = drm_vm_open,
	.close = drm_vm_close,
};

/**
 * \c open method for shared virtual memory.
 *
 * \param vma virtual memory area.
 *
 * Create a new drm_vma_entry structure as the \p vma private data entry and
 * add it to drm_device::vmalist.
 */
static void drm_vm_open_locked(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->head->dev;
	struct drm_vma_entry *vma_entry;

	DRM_DEBUG("0x%08lx,0x%08lx\n",
		  vma->vm_start, vma->vm_end - vma->vm_start);
	atomic_inc(&dev->vma_count);

	vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
	if (vma_entry) {
		vma_entry->vma = vma;
		vma_entry->pid = current->pid;
		list_add(&vma_entry->head, &dev->vmalist);
	}
}

static void drm_vm_open(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->head->dev;

	mutex_lock(&dev->struct_mutex);
	drm_vm_open_locked(vma);
	mutex_unlock(&dev->struct_mutex);
}

/**
 * \c close method for all virtual memory types.
 *
 * \param vma virtual memory area.
 *
 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
 * free it.
 */
static void drm_vm_close(struct vm_area_struct *vma)
{
	struct drm_file *priv = vma->vm_file->private_data;
	struct drm_device *dev = priv->head->dev;
	struct drm_vma_entry *pt, *temp;

	DRM_DEBUG("0x%08lx,0x%08lx\n",
		  vma->vm_start, vma->vm_end - vma->vm_start);
	atomic_dec(&dev->vma_count);

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
		if (pt->vma == vma) {
			list_del(&pt->head);
			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
			break;
		}
	}
	mutex_unlock(&dev->struct_mutex);
}


/**
 * mmap DMA memory.
 *
 * \param file_priv DRM file private.
 * \param vma virtual memory area.
 * \return zero on success or a negative number on failure.
 *
 * Sets the virtual memory area operations structure to vm_dma_ops, the file
 * pointer, and calls vm_open().
 */
static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev;
	struct drm_device_dma *dma;
	unsigned long length = vma->vm_end - vma->vm_start;

	dev = priv->head->dev;
	dma = dev->dma;
	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
		  vma->vm_start, vma->vm_end, vma->vm_pgoff);

	/* Length must match exact page count */
	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
		return -EINVAL;
	}

	if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
		/* Ye gads this is ugly.  With more thought
		   we could move this up higher and use
		   `protection_map' instead.  */
		vma->vm_page_prot =
		    __pgprot(pte_val
			     (pte_wrprotect
			      (__pte(pgprot_val(vma->vm_page_prot)))));
#endif
	}

	vma->vm_ops = &drm_vm_dma_ops;
	vma->vm_flags |= VM_RESERVED;	/* Don't swap */

	vma->vm_file = filp;	/* Needed for drm_vm_open() */
	drm_vm_open_locked(vma);
	return 0;
}

unsigned long drm_core_get_map_ofs(struct drm_map * map)
{
	return map->offset;
}
EXPORT_SYMBOL(drm_core_get_map_ofs);

unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
	return dev->hose->dense_mem_base - dev->hose->mem_space->start;
#else
	return 0;
#endif
}
EXPORT_SYMBOL(drm_core_get_reg_ofs);

/**
 * mmap DMA memory.
 *
 * \param file_priv DRM file private.
 * \param vma virtual memory area.
 * \return zero on success or a negative number on failure.
 *
 * If the virtual memory area has no offset associated with it then it's a DMA
 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
 * checks that the restricted flag is not set, sets the virtual memory operations
 * according to the mapping type and remaps the pages. Finally sets the file
 * pointer and calls vm_open().
 */
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->head->dev;
	struct drm_map *map = NULL;
	unsigned long offset = 0;
	struct drm_hash_item *hash;

	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
		  vma->vm_start, vma->vm_end, vma->vm_pgoff);

	if (!priv->authenticated)
		return -EACCES;

	/* We check for "dma". On Apple's UniNorth, it's valid to have
	 * the AGP mapped at physical address 0
	 * --BenH.
	 */

	if (!vma->vm_pgoff
#if __OS_HAS_AGP
	    && (!dev->agp
		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
#endif
	    )
		return drm_mmap_dma(filp, vma);

	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
		DRM_ERROR("Could not find map\n");
		return -EINVAL;
	}

	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
		return -EPERM;

	/* Check for valid size. */
	if (map->size < vma->vm_end - vma->vm_start)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
		/* Ye gads this is ugly.  With more thought
		   we could move this up higher and use
		   `protection_map' instead.  */
		vma->vm_page_prot =
		    __pgprot(pte_val
			     (pte_wrprotect
			      (__pte(pgprot_val(vma->vm_page_prot)))));
#endif
	}

	switch (map->type) {
	case _DRM_AGP:
		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
			/*
			 * On some platforms we can't talk to bus dma address from the CPU, so for
			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
			 * pages and mappings in nopage()
			 */
#if defined(__powerpc__)
			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
#endif
			vma->vm_ops = &drm_vm_ops;
			break;
		}
		/* fall through to _DRM_FRAME_BUFFER... */
	case _DRM_FRAME_BUFFER:
	case _DRM_REGISTERS:
		offset = dev->driver->get_reg_ofs(dev);
		vma->vm_flags |= VM_IO;	/* not in core dump */
		vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
		if (io_remap_pfn_range(vma, vma->vm_start,
					(map->offset + offset) >> PAGE_SHIFT,
					vma->vm_end - vma->vm_start,
					vma->vm_page_prot))
			return -EAGAIN;
		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
			  " offset = 0x%lx\n",
			  map->type,
			  vma->vm_start, vma->vm_end, map->offset + offset);
		vma->vm_ops = &drm_vm_ops;
		break;
	case _DRM_CONSISTENT:
		/* Consistent memory is really like shared memory. But
		 * it's allocated in a different way, so avoid nopage */
		if (remap_pfn_range(vma, vma->vm_start,
		    page_to_pfn(virt_to_page(map->handle)),
		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
			return -EAGAIN;
	/* fall through to _DRM_SHM */
	case _DRM_SHM:
		vma->vm_ops = &drm_vm_shm_ops;
		vma->vm_private_data = (void *)map;
		/* Don't let this area swap.  Change when
		   DRM_KERNEL advisory is supported. */
		vma->vm_flags |= VM_RESERVED;
		break;
	case _DRM_SCATTER_GATHER:
		vma->vm_ops = &drm_vm_sg_ops;
		vma->vm_private_data = (void *)map;
		vma->vm_flags |= VM_RESERVED;
		break;
	case _DRM_TTM:
		return drm_bo_mmap_locked(vma, filp, map);
	default:
		return -EINVAL;	/* This should never happen. */
	}
	vma->vm_flags |= VM_RESERVED;	/* Don't swap */

	vma->vm_file = filp;	/* Needed for drm_vm_open() */
	drm_vm_open_locked(vma);
	return 0;
}

int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->head->dev;
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = drm_mmap_locked(filp, vma);
	mutex_unlock(&dev->struct_mutex);

	return ret;
}
EXPORT_SYMBOL(drm_mmap);

/**
 * buffer object vm functions.
 */

/**
 * \c Pagefault method for buffer objects.
 *
 * \param vma Virtual memory area.
 * \param address File offset.
 * \return Error or refault. The pfn is manually inserted.
 *
 * It's important that pfns are inserted while holding the bo->mutex lock.
 * otherwise we might race with unmap_mapping_range() which is always
 * called with the bo->mutex lock held.
 *
 * We're modifying the page attribute bits of the vma->vm_page_prot field,
 * without holding the mmap_sem in write mode. Only in read mode.
 * These bits are not used by the mm subsystem code, and we consider them
 * protected by the bo->mutex lock.
 */

#ifdef DRM_FULL_MM_COMPAT
static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
				     unsigned long address)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
	unsigned long page_offset;
	struct page *page = NULL;
	struct drm_ttm *ttm;
	struct drm_device *dev;
	unsigned long pfn;
	int err;
	unsigned long bus_base;
	unsigned long bus_offset;
	unsigned long bus_size;
	unsigned long ret = NOPFN_REFAULT;

	if (address > vma->vm_end)
		return NOPFN_SIGBUS;

	dev = bo->dev;
	err = drm_bo_read_lock(&dev->bm.bm_lock);
	if (err)
		return NOPFN_REFAULT;

	err = mutex_lock_interruptible(&bo->mutex);
	if (err) {
		drm_bo_read_unlock(&dev->bm.bm_lock);
		return NOPFN_REFAULT;
	}

	err = drm_bo_wait(bo, 0, 0, 0);
	if (err) {
		ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
		goto out_unlock;
	}

	/*
	 * If buffer happens to be in a non-mappable location,
	 * move it to a mappable.
	 */

	if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
		uint32_t new_mask = bo->mem.mask |
			DRM_BO_FLAG_MAPPABLE |
			DRM_BO_FLAG_FORCE_MAPPABLE;
		err = drm_bo_move_buffer(bo, new_mask, 0, 0);
		if (err) {
			ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
			goto out_unlock;
		}
	}

	err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
				&bus_size);

	if (err) {
		ret = NOPFN_SIGBUS;
		goto out_unlock;
	}

	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;

	if (bus_size) {
		struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];

		pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
		vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
	} else {
		ttm = bo->ttm;

		drm_ttm_fixup_caching(ttm);
		page = drm_ttm_get_page(ttm, page_offset);
		if (!page) {
			ret = NOPFN_OOM;
			goto out_unlock;
		}
		pfn = page_to_pfn(page);
		vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
			vm_get_page_prot(vma->vm_flags) :
			drm_io_prot(_DRM_TTM, vma);
	}

	err = vm_insert_pfn(vma, address, pfn);
	if (err) {
		ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
		goto out_unlock;
	}
out_unlock:
	mutex_unlock(&bo->mutex);
	drm_bo_read_unlock(&dev->bm.bm_lock);
	return ret;
}
#endif

static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;

	drm_vm_open_locked(vma);
	atomic_inc(&bo->usage);
#ifdef DRM_ODD_MM_COMPAT
	drm_bo_add_vma(bo, vma);
#endif
}

/**
 * \c vma open method for buffer objects.
 *
 * \param vma virtual memory area.
 */

static void drm_bo_vm_open(struct vm_area_struct *vma)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;