summaryrefslogtreecommitdiff
path: root/linux-core/nv40_mc.c
ref='#n1'>12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804
AgeCommit message (Expand)Author
/**************************************************************************
 *
 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
 */

#include "drmP.h"

/*
 * Locking may look a bit complicated but isn't really:
 *
 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
 * when there is a chance that it can be zero before or after the operation.
 *
 * dev->struct_mutex also protects all lists and list heads,
 * Hash tables and hash heads.
 *
 * bo->mutex protects the buffer object itself excluding the usage field.
 * bo->mutex does also protect the buffer list heads, so to manipulate those,
 * we need both the bo->mutex and the dev->struct_mutex.
 *
 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
 * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
 * the list traversal will, in general, need to be restarted.
 *
 */

static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);

static inline uint64_t drm_bo_type_flags(unsigned type)
{
	return (1ULL << (24 + type));
}

/*
 * bo locked. dev->struct_mutex locked.
 */

void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
{
	struct drm_mem_type_manager *man;

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
	DRM_ASSERT_LOCKED(&bo->mutex);

	man = &bo->dev->bm.man[bo->pinned_mem_type];
	list_add_tail(&bo->pinned_lru, &man->pinned);
}

void drm_bo_add_to_lru(struct drm_buffer_object *bo)
{
	struct drm_mem_type_manager *man;

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);

	if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
	    || bo->mem.mem_type != bo->pinned_mem_type) {
		man = &bo->dev->bm.man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru);
	} else {
		INIT_LIST_HEAD(&bo->lru);
	}
}

static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
{
#ifdef DRM_ODD_MM_COMPAT
	int ret;

	if (!bo->map_list.map)
		return 0;

	ret = drm_bo_lock_kmm(bo);
	if (ret)
		return ret;
	drm_bo_unmap_virtual(bo);
	if (old_is_pci)
		drm_bo_finish_unmap(bo);
#else
	if (!bo->map_list.map)
		return 0;

	drm_bo_unmap_virtual(bo);
#endif
	return 0;
}

static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
{
#ifdef DRM_ODD_MM_COMPAT
	int ret;

	if (!bo->map_list.map)
		return;

	ret = drm_bo_remap_bound(bo);
	if (ret) {
		DRM_ERROR("Failed to remap a bound buffer object.\n"
			  "\tThis might cause a sigbus later.\n");
	}
	drm_bo_unlock_kmm(bo);
#endif
}

/*
 * Call bo->mutex locked.
 */

static int drm_bo_add_ttm(struct drm_buffer_object *bo)
{
	struct drm_device *dev = bo->dev;
	int ret = 0;
	uint32_t page_flags = 0;

	DRM_ASSERT_LOCKED(&bo->mutex);
	bo->ttm = NULL;

	if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
		page_flags |= DRM_TTM_PAGE_WRITE;

	switch (bo->type) {
	case drm_bo_type_device:
	case drm_bo_type_kernel:
		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
					 page_flags, dev->bm.dummy_read_page);
		if (!bo->ttm)
			ret = -ENOMEM;
		break;
	case drm_bo_type_user:
		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
					 page_flags | DRM_TTM_PAGE_USER,
					 dev->bm.dummy_read_page);
		if (!bo->ttm)
			ret = -ENOMEM;

		ret = drm_ttm_set_user(bo->ttm, current,
				       bo->buffer_start,
				       bo->num_pages);
		if (ret)
			return ret;

		break;
	default:
		DRM_ERROR("Illegal buffer object type\n");
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
				  struct drm_bo_mem_reg *mem,
				  int evict, int no_wait)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
	int new_is_pci = drm_mem_reg_is_pci(dev, mem);
	struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
	struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
	    ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
		ret = drm_bo_vm_pre_move(bo, old_is_pci);
	if (ret)
		return ret;

	/*
	 * Create and bind a ttm if required.
	 */

	if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
		ret = drm_bo_add_ttm(bo);
		if (ret)
			goto out_err;

		if (mem->mem_type != DRM_BO_MEM_LOCAL) {
			ret = drm_ttm_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
			
			struct drm_bo_mem_reg *old_mem = &bo->mem;
			uint64_t save_flags = old_mem->flags;
			uint64_t save_proposed_flags = old_mem->proposed_flags;
			
			*old_mem = *mem;
			mem->mm_node = NULL;
			old_mem->proposed_flags = save_proposed_flags;
			DRM_FLAG_MASKED(save_flags, mem->flags,
					DRM_BO_MASK_MEMTYPE);
			goto moved;
		}
		
	}

	if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
	    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))		
		ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
	else if (dev->driver->bo_driver->move) 
		ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
	else
		ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);

	if (ret)
		goto out_err;

moved:
	if (old_is_pci || new_is_pci)
		drm_bo_vm_post_move(bo);

	if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
		ret =
		    dev->driver->bo_driver->invalidate_caches(dev,
							      bo->mem.flags);
		if (ret)
			DRM_ERROR("Can not flush read caches\n");
	}

	DRM_FLAG_MASKED(bo->priv_flags,
			(evict) ? _DRM_BO_FLAG_EVICTED : 0,
			_DRM_BO_FLAG_EVICTED);

	if (bo->mem.mm_node)
		bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
			bm->man[bo->mem.mem_type].gpu_offset;


	return 0;

out_err:
	if (old_is_pci || new_is_pci)
		drm_bo_vm_post_move(bo);

	new_man = &bm->man[bo->mem.mem_type];
	if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
		drm_ttm_unbind(bo->ttm);
		drm_ttm_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

/*
 * Call bo->mutex locked.
 * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
 */

static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
{
	struct drm_fence_object *fence = bo->fence;

	if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
		return -EBUSY;

	if (fence) {
		if (drm_fence_object_signaled(fence, bo->fence_type)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
		if (drm_fence_object_signaled(fence, bo->fence_type)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		return -EBUSY;
	}
	return 0;
}

static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
{
	int ret;

	mutex_lock(&bo->mutex);
	ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	mutex_unlock(&bo->mutex);
	return ret;
}


/*
 * Call bo->mutex locked.
 * Wait until the buffer is idle.
 */

int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
		int no_wait, int check_unfenced)
{
	int ret;

	DRM_ASSERT_LOCKED(&bo->mutex);
	while(unlikely(drm_bo_busy(bo, check_unfenced))) {
		if (no_wait)
			return -EBUSY;

		if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
			mutex_unlock(&bo->mutex);
			wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
			mutex_lock(&bo->mutex);
			bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
		}

		if (bo->fence) {
			struct drm_fence_object *fence;
			uint32_t fence_type = bo->fence_type;

			drm_fence_reference_unlocked(&fence, bo->fence);
			mutex_unlock(&bo->mutex);

			ret = drm_fence_object_wait(fence, lazy, !interruptible,
						    fence_type);

			drm_fence_usage_deref_unlocked(&fence);
			mutex_lock(&bo->mutex);
			bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
			if (ret)
				return ret;
		}

	}
	return 0;
}
EXPORT_SYMBOL(drm_bo_wait);

static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	if (bo->fence) {
		if (bm->nice_mode) {
			unsigned long _end = jiffies + 3 * DRM_HZ;
			int ret;
			do {
				ret = drm_bo_wait(bo, 0, 0, 0, 0);
				if (ret && allow_errors)
					return ret;

			} while (ret && !time_after_eq(jiffies, _end));

			if (bo->fence) {
				bm->nice_mode = 0;
				DRM_ERROR("Detected GPU lockup or "
					  "fence driver was taken down. "
					  "Evicting buffer.\n");
			}
		}
		if (bo->fence)
			drm_fence_usage_deref_unlocked(&bo->fence);
	}
	return 0;
}

/*
 * Call dev->struct_mutex locked.
 * Attempts to remove all private references to a buffer by expiring its
 * fence object and removing from lru lists and memory managers.
 */

static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);

	atomic_inc(&bo->usage);
	mutex_unlock(&dev->struct_mutex);
	mutex_lock(&bo->mutex);

	DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);

	if (bo->fence && drm_fence_object_signaled(bo->fence,
						   bo->fence_type))
		drm_fence_usage_deref_unlocked(&bo->fence);

	if (bo->fence && remove_all)
		(void)drm_bo_expire_fence(bo, 0);

	mutex_lock(&dev->struct_mutex);

	if (!atomic_dec_and_test(&bo->usage))
		goto out;

	if (!bo->fence) {
		list_del_init(&bo->lru);
		if (bo->mem.mm_node) {
			drm_mm_put_block(bo->mem.mm_node);
			if (bo->pinned_node == bo->mem.mm_node)
				bo->pinned_node = NULL;
			bo->mem.mm_node = NULL;
		}
		list_del_init(&bo->pinned_lru);
		if (bo->pinned_node) {
			drm_mm_put_block(bo->pinned_node);
			bo->pinned_node = NULL;
		}
		list_del_init(&bo->ddestroy);
		mutex_unlock(&bo->mutex);
		drm_bo_destroy_locked(bo);
		return;
	}

	if (list_empty(&bo->ddestroy)) {
		drm_fence_object_flush(bo->fence, bo->fence_type);
		list_add_tail(&bo->ddestroy, &bm->ddestroy);
		schedule_delayed_work(&bm->wq,
				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
	}

out:
	mutex_unlock(&bo->mutex);
	return;
}

/*
 * Verify that refcount is 0 and that there are no internal references
 * to the buffer object. Then destroy it.
 */

static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);

	if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
	    list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
	    list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
		if (bo->fence != NULL) {
			DRM_ERROR("Fence was non-zero.\n");
			drm_bo_cleanup_refs(bo, 0);
			return;
		}

#ifdef DRM_ODD_MM_COMPAT
		BUG_ON(!list_empty(&bo->vma_list));
		BUG_ON(!list_empty(&bo->p_mm_list));
#endif

		if (bo->ttm) {
			drm_ttm_unbind(bo->ttm);
			drm_ttm_destroy(bo->ttm);
			bo->ttm = NULL;
		}

		atomic_dec(&bm->count);

		drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);

		return;
	}

	/*
	 * Some stuff is still trying to reference the buffer object.
	 * Get rid of those references.
	 */

	drm_bo_cleanup_refs(bo, 0);

	return;
}

/*
 * Call dev->struct_mutex locked.
 */

static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
{
	struct drm_buffer_manager *bm = &dev->bm;

	struct drm_buffer_object *entry, *nentry;
	struct list_head *list, *next;

	list_for_each_safe(list, next, &bm->ddestroy) {
		entry = list_entry(list, struct drm_buffer_object, ddestroy);

		nentry = NULL;
		if (next != &bm->ddestroy) {
			nentry = list_entry(next, struct drm_buffer_object,
					    ddestroy);
			atomic_inc(&nentry->usage);
		}

		drm_bo_cleanup_refs(entry, remove_all);

		if (nentry)
			atomic_dec(&nentry->usage);
	}
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
static void drm_bo_delayed_workqueue(void *data)
#else
static void drm_bo_delayed_workqueue(struct work_struct *work)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	struct drm_device *dev = (struct drm_device *) data;
	struct drm_buffer_manager *bm = &dev->bm;
#else
	struct drm_buffer_manager *bm =
	    container_of(work, struct drm_buffer_manager, wq.work);
	struct drm_device *dev = container_of(bm, struct drm_device, bm);
#endif

	DRM_DEBUG("Delayed delete Worker\n");

	mutex_lock(&dev->struct_mutex);
	if (!bm->initialized) {
		mutex_unlock(&dev->struct_mutex);
		return;
	}
	drm_bo_delayed_delete(dev, 0);
	if (bm->initialized && !list_empty(&bm->ddestroy)) {
		schedule_delayed_work(&bm->wq,
				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
	}
	mutex_unlock(&dev->struct_mutex);
}

void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
{
	struct drm_buffer_object *tmp_bo = *bo;
	bo = NULL;

	DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);

	if (atomic_dec_and_test(&tmp_bo->usage))
		drm_bo_destroy_locked(tmp_bo);
}
EXPORT_SYMBOL(drm_bo_usage_deref_locked);

static void drm_bo_base_deref_locked(struct drm_file *file_priv,
				     struct drm_user_object *uo)
{
	struct drm_buffer_object *bo =
	    drm_user_object_entry(uo, struct drm_buffer_object, base);

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);

	drm_bo_takedown_vm_locked(bo);
	drm_bo_usage_deref_locked(&bo);
}

void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
{
	struct drm_buffer_object *tmp_bo = *bo;
	struct drm_device *dev = tmp_bo->dev;

	*bo = NULL;
	if (atomic_dec_and_test(&tmp_bo->usage)) {
		mutex_lock(&dev->struct_mutex);
		if (atomic_read(&tmp_bo->usage) == 0)
			drm_bo_destroy_locked(tmp_bo);
		mutex_unlock(&dev->struct_mutex);
	}
}
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);

void drm_putback_buffer_objects(struct drm_device *dev)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct list_head *list = &bm->unfenced;
	struct drm_buffer_object *entry, *next;

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry_safe(entry, next, list, lru) {
		atomic_inc(&entry->usage);
		mutex_unlock(&dev->struct_mutex);

		mutex_lock(&entry->mutex);
		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
		mutex_lock(&dev->struct_mutex);

		list_del_init(&entry->lru);
		DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
		wake_up_all(&entry->event_queue);

		/*
		 * FIXME: Might want to put back on head of list
		 * instead of tail here.
		 */

		drm_bo_add_to_lru(entry);
		mutex_unlock(&entry->mutex);
		drm_bo_usage_deref_locked(&entry);
	}
	mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_putback_buffer_objects);

/*
 * Note. The caller has to register (if applicable)
 * and deregister fence object usage.
 */

int drm_fence_buffer_objects(struct drm_device *dev,
			     struct list_head *list,
			     uint32_t fence_flags,
			     struct drm_fence_object *fence,
			     struct drm_fence_object **used_fence)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_buffer_object *entry;
	uint32_t fence_type = 0;
	uint32_t fence_class = ~0;
	int count = 0;
	int ret = 0;
	struct list_head *l;

	mutex_lock(&dev->struct_mutex);

	if (!list)
		list = &bm->unfenced;

	if (fence)
		fence_class = fence->fence_class;

	list_for_each_entry(entry, list, lru) {
		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
		fence_type |= entry->new_fence_type;
		if (fence_class == ~0)
			fence_class = entry->new_fence_class;
		else if (entry->new_fence_class != fence_class) {
			DRM_ERROR("Unmatching fence classes on unfenced list: "
				  "%d and %d.\n",
				  fence_class,
				  entry->new_fence_class);
			ret = -EINVAL;
			goto out;
		}
		count++;
	}

	if (!count) {
		ret = -EINVAL;
		goto out;
	}

	if (fence) {
		if ((fence_type & fence->type) != fence_type ||
		    (fence->fence_class != fence_class)) {
			DRM_ERROR("Given fence doesn't match buffers "
				  "on unfenced list.\n");
			ret = -EINVAL;
			goto out;
		}
	} else {
		mutex_unlock(&dev->struct_mutex);
		ret = drm_fence_object_create(dev, fence_class, fence_type,
					      fence_flags | DRM_FENCE_FLAG_EMIT,
					      &fence);
		mutex_lock(&dev->struct_mutex);
		if (ret)
			goto out;
	}

	count = 0;
	l = list->next;
	while (l != list) {
		prefetch(l->next);
		entry = list_entry(l, struct drm_buffer_object, lru);
		atomic_inc(&entry->usage);
		mutex_unlock(&dev->struct_mutex);
		mutex_lock(&entry->mutex);
		mutex_lock(&dev->struct_mutex);
		list_del_init(l);
		if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
			count++;
			if (entry->fence)
				drm_fence_usage_deref_locked(&entry->fence);
			entry->fence = drm_fence_reference_locked(fence);
			entry->fence_class = entry->new_fence_class;
			entry->fence_type = entry->new_fence_type;
			DRM_FLAG_MASKED(entry->priv_flags, 0,
					_DRM_BO_FLAG_UNFENCED);
			wake_up_all(&entry->event_queue);
			drm_bo_add_to_lru(entry);
		}
		mutex_unlock(&entry->mutex);
		drm_bo_usage_deref_locked(&entry);
		l = list->next;
	}
	DRM_DEBUG("Fenced %d buffers\n", count);
out:
	mutex_unlock(&dev->struct_mutex);
	*used_fence = fence;
	return ret;
}
EXPORT_SYMBOL(drm_fence_buffer_objects);

/*
 * bo->mutex locked
 */

static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
			int no_wait)
{
	int ret = 0;
	struct drm_device *dev = bo->dev;
	struct drm_bo_mem_reg evict_mem;

	/*
	 * Someone might have modified the buffer before we took the
	 * buffer mutex.
	 */

	do {
		bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;

		if (unlikely(bo->mem.flags &
			     (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
			goto out_unlock;
		if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
			goto out_unlock;
		if (unlikely(bo->mem.mem_type != mem_type))
			goto out_unlock;
		ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
		if (ret)
			goto out_unlock;

	} while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;

	evict_mem = bo->mem;
	evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);

	mutex_lock(&dev->struct_mutex);
	list_del_init(&bo->lru);
	mutex_unlock(&dev->struct_mutex);

	ret = drm_bo_mem_space(bo, &evict_mem, no_wait);

	if (ret) {
		if (ret != -EAGAIN)
			DRM_ERROR("Failed to find memory space for "
				  "buffer 0x%p eviction.\n", bo);
		goto out;
	}

	ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);

	if (ret) {
		if (ret != -EAGAIN)
			DRM_ERROR("Buffer eviction failed\n");
		goto out;
	}

	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
			_DRM_BO_FLAG_EVICTED);

out:
	mutex_lock(&dev->struct_mutex);
	if (evict_mem.mm_node) {
		if (evict_mem.mm_node != bo->pinned_node)
			drm_mm_put_block(evict_mem.mm_node);
		evict_mem.mm_node = NULL;
	}
	drm_bo_add_to_lru(bo);
	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
out_unlock:
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
static int drm_bo_mem_force_space(struct drm_device *dev,
				  struct drm_bo_mem_reg *mem,
				  uint32_t mem_type, int no_wait)
{
	struct drm_mm_node *node;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_buffer_object *entry;
	struct drm_mem_type_manager *man = &bm->man[mem_type];
	struct list_head *lru;
	unsigned long num_pages = mem->num_pages;
	int ret;

	mutex_lock(&dev->struct_mutex);
	do {
		node = drm_mm_search_free(&man->manager, num_pages,
					  mem->page_alignment, 1);
		if (node)
			break;

		lru = &man->lru;
		if (lru->next == lru)
			break;

		entry = list_entry(lru->next, struct drm_buffer_object, lru);
		atomic_inc(&entry->usage);
		mutex_unlock(&dev->struct_mutex);
		mutex_lock(&entry->mutex);
		ret = drm_bo_evict(entry, mem_type, no_wait);
		mutex_unlock(&entry->mutex);
		drm_bo_usage_deref_unlocked(&entry);
		if (ret)
			return ret;
		mutex_lock(&dev->struct_mutex);
	} while (1);

	if (!node) {
		mutex_unlock(&dev->struct_mutex);
		return -ENOMEM;
	}

	node = drm_mm_get_block(node, num_pages, mem->page_alignment);
	if (unlikely(!node)) {
		mutex_unlock(&dev->struct_mutex);
		return -ENOMEM;
	}

	mutex_unlock(&dev->struct_mutex);
	mem->mm_node = node;
	mem->mem_type = mem_type;
	return 0;
}

static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
				int disallow_fixed,
				uint32_t mem_type,
				uint64_t mask, uint32_t *res_mask)
{
	uint64_t cur_flags = drm_bo_type_flags(mem_type);
	uint64_t flag_diff;

	if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
		return 0;
	if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
		cur_flags |= DRM_BO_FLAG_CACHED;
	if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
		cur_flags |= DRM_BO_FLAG_MAPPABLE;
	if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
		DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);

	if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
		return 0;

	if (mem_type == DRM_BO_MEM_LOCAL) {
		*res_mask = cur_flags;
		return 1;
	}

	flag_diff = (mask ^ cur_flags);
	if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
		cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;

	if ((flag_diff & DRM_BO_FLAG_CACHED) &&
	    (!(mask & DRM_BO_FLAG_CACHED) ||
	     (mask & DRM_BO_FLAG_FORCE_CACHING)))
		return 0;

	if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
	    ((mask & DRM_BO_FLAG_MAPPABLE) ||
	     (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
		return 0;

	*res_mask = cur_flags;
	return 1;
}

/**
 * Creates space for memory region @mem according to its type.
 *
 * This function first searches for free space in compatible memory types in
 * the priority order defined by the driver.  If free space isn't found, then
 * drm_bo_mem_force_space is attempted in priority order to evict and find
 * space.
 */
int drm_bo_mem_space(struct drm_buffer_object *bo,
		     struct drm_bo_mem_reg *mem, int no_wait)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_mem_type_manager *man;

	uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
	const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
	uint32_t i;
	uint32_t mem_type = DRM_BO_MEM_LOCAL;
	uint32_t cur_flags;
	int type_found = 0;
	int type_ok = 0;
	int has_eagain = 0;
	struct drm_mm_node *node = NULL;
	int ret;

	mem->mm_node = NULL;
	for (i = 0; i < num_prios; ++i) {
		mem_type = prios[i];
		man = &bm->man[mem_type];

		type_ok = drm_bo_mt_compatible(man,
					       bo->type == drm_bo_type_user,
					       mem_type, mem->proposed_flags,
					       &cur_flags);

		if (!type_ok)
			continue;

		if (mem_type == DRM_BO_MEM_LOCAL)
			break;

		if ((mem_type == bo->pinned_mem_type) &&
		    (bo->pinned_node != NULL)) {
			node = bo->pinned_node;
			break;
		}

		mutex_lock(&dev->struct_mutex);
		if (man->has_type && man->use_type) {
			type_found = 1;
			node = drm_mm_search_free(&man->manager, mem->num_pages,
						  mem->page_alignment, 1);
			if (node)
				node = drm_mm_get_block(node, mem->num_pages,
							mem->page_alignment);
		}
		mutex_unlock(&dev->struct_mutex);
		if (node)
			break;
	}

	if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
		mem->mm_node = node;
		mem->mem_type = mem_type;
		mem->flags = cur_flags;
		return 0;
	}

	if (!type_found)
		return -EINVAL;

	num_prios = dev->driver->bo_driver->num_mem_busy_prio;
	prios = dev->driver->bo_driver->mem_busy_prio;

	for (i = 0; i < num_prios; ++i) {
		mem_type = prios[i];
		man = &bm->man[mem_type];

		if (!man->has_type)
			continue;

		if (!drm_bo_mt_compatible(man,
					  bo->type == drm_bo_type_user,
					  mem_type,
					  mem->proposed_flags,
					  &cur_flags))
			continue;

		ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);

		if (ret == 0 && mem->mm_node) {
			mem->flags = cur_flags;
			return 0;
		}

		if (ret == -EAGAIN)
			has_eagain = 1;
	}

	ret = (has_eagain) ? -EAGAIN : -ENOMEM;
	return ret;
}
EXPORT_SYMBOL(drm_bo_mem_space);

/*
 * drm_bo_propose_flags:
 *
 * @bo: the buffer object getting new flags
 *
 * @new_flags: the new set of proposed flag bits
 *
 * @new_mask: the mask of bits changed in new_flags
 *
 * Modify the proposed_flag bits in @bo
 */
static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
					 uint64_t new_flags, uint64_t new_mask)
{
	uint32_t new_access;

	/* Copy unchanging bits from existing proposed_flags */
	DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
	 
	if (bo->type == drm_bo_type_user &&
	    ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
	     (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
		DRM_ERROR("User buffers require cache-coherent memory.\n");
		return -EINVAL;
	}

	if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
		DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
		return -EPERM;
	}

	if (likely(new_mask & DRM_BO_MASK_MEM) &&
	    (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
	    !DRM_SUSER(DRM_CURPROC)) {
		if (likely(bo->mem.flags & new_flags & new_mask &
			   DRM_BO_MASK_MEM))
			new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
				(bo->mem.flags & DRM_BO_MASK_MEM);
		else {
			DRM_ERROR("Incompatible memory type specification "
				  "for NO_EVICT buffer.\n");
			return -EPERM;
		}
	}

	if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
		DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
		return -EPERM;
	}

	new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
				  DRM_BO_FLAG_READ);

	if (new_access == 0) {
		DRM_ERROR("Invalid buffer object rwx properties\n");
		return -EINVAL;
	}

	bo->mem.proposed_flags = new_flags;
	return 0;
}

/*
 * Call dev->struct_mutex locked.
 */

struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
					      uint32_t handle, int check_owner)
{
	struct drm_user_object *uo;
	struct drm_buffer_object *bo;

	uo = drm_lookup_user_object(file_priv, handle);

	if (!uo || (uo->type != drm_buffer_type)) {
		DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
		return NULL;
	}

	if (check_owner && file_priv != uo->owner) {
		if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
			return NULL;
	}

	bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
	atomic_inc(&bo->usage);
	return bo;
}
EXPORT_SYMBOL(drm_lookup_buffer_object);

/*
 * Call bo->mutex locked.
 * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
 */

static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
{
	struct drm_fence_object *fence = bo->fence;

	if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
		return -EBUSY;

	if (fence) {
		if (drm_fence_object_signaled(fence, bo->fence_type)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		return -EBUSY;
	}
	return 0;
}

int drm_bo_evict_cached(struct drm_buffer_object *bo)
{
	int ret = 0;

	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	if (bo->mem.mm_node)
		ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
	return ret;
}

EXPORT_SYMBOL(drm_bo_evict_cached);
/*
 * Wait until a buffer is unmapped.
 */

static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
{
	int ret = 0;

	if (likely(atomic_read(&bo->mapped)) == 0)
		return 0;

	if (unlikely(no_wait))
		return -EBUSY;

	do {
		mutex_unlock(&bo->mutex);
		ret = wait_event_interruptible(bo->event_queue,
					       atomic_read(&bo->mapped) == 0);
		mutex_lock(&bo->mutex);
		bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;

		if (ret == -ERESTARTSYS)
			ret = -EAGAIN;
	} while((ret == 0) && atomic_read(&bo->mapped) > 0);

	return ret;
}

/*
 * Fill in the ioctl reply argument with buffer info.
 * Bo locked.
 */

void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
			 struct drm_bo_info_rep *rep)
{
	if (!rep)
		return;

	rep->handle = bo->base.hash.key;
	rep->flags = bo->mem.flags;
	rep->size = bo->num_pages * PAGE_SIZE;
	rep->offset = bo->offset;

	/*
	 * drm_bo_type_device buffers have user-visible
	 * handles which can be used to share across
	 * processes. Hand that back to the application
	 */
	if (bo->type == drm_bo_type_device)
		rep->arg_handle = bo->map_list.user_token;
	else
		rep->arg_handle = 0;

	rep->proposed_flags = bo->mem.proposed_flags;
	rep->buffer_start = bo->buffer_start;
	rep->fence_flags = bo->fence_type;
	rep->rep_flags = 0;
	rep->page_alignment = bo->mem.page_alignment;

	if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
		DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
				DRM_BO_REP_BUSY);
	}
}
EXPORT_SYMBOL(drm_bo_fill_rep_arg);

/*
 * Wait for buffer idle and register that we've mapped the buffer.
 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
 * so that if the client dies, the mapping is automatically
 * unregistered.
 */

static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
				 uint32_t map_flags, unsigned hint,
				 struct drm_bo_info_rep *rep)
{
	struct drm_buffer_object *bo;
	struct drm_device *dev = file_priv->minor->dev;
	int ret = 0;
	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;

	mutex_lock(&dev->struct_mutex);
	bo = drm_lookup_buffer_object(file_priv, handle, 1);
	mutex_unlock(&dev->struct_mutex);

	if (!bo)
		return -EINVAL;

	mutex_lock(&bo->mutex);
	do {
		bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;

		ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
		if (unlikely(ret))
			goto out;

		if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
			drm_bo_evict_cached(bo);

	} while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));

	atomic_inc(&bo->mapped);
	mutex_lock(&dev->struct_mutex);
	ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
	mutex_unlock(&dev->struct_mutex);
	if (ret) {
		if (atomic_dec_and_test(&bo->mapped))
			wake_up_all(&bo->event_queue);

	} else
		drm_bo_fill_rep_arg(bo, rep);

 out:
	mutex_unlock(&bo->mutex);
	drm_bo_usage_deref_unlocked(&bo);

	return ret;
}

static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
{
	struct drm_device *dev = file_priv->minor->dev;
	struct drm_buffer_object *bo;
	struct drm_ref_object *ro;
	int ret = 0;

	mutex_lock(&dev->struct_mutex);

	bo = drm_lookup_buffer_object(file_priv, handle, 1);
	if (!bo) {
		ret = -EINVAL;
		goto out;
	}

	ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
	if (!ro) {
		ret = -EINVAL;
		goto out;
	}

	drm_remove_ref_object(file_priv, ro);
	drm_bo_usage_deref_locked(&bo);
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/*
 * Call struct-sem locked.
 */

static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
					 struct drm_user_object *uo,
					 enum drm_ref_type action)
{
	struct drm_buffer_object *bo =
	    drm_user_object_entry(uo, struct drm_buffer_object, base);

	/*
	 * We DON'T want to take the bo->lock here, because we want to
	 * hold it when we wait for unmapped buffer.
	 */

	BUG_ON(action != _DRM_REF_TYPE1);

	if (atomic_dec_and_test(&bo->mapped))
		wake_up_all(&bo->event_queue);
}

/*
 * bo->mutex locked.
 * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
 */

int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
		       int no_wait, int move_unfenced)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	int ret = 0;
	struct drm_bo_mem_reg mem;

	BUG_ON(bo->fence != NULL);

	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.proposed_flags = new_mem_flags;
	mem.page_alignment = bo->mem.page_alignment;

	mutex_lock(&bm->evict_mutex);
	mutex_lock(&dev->struct_mutex);
	list_del_init(&bo->lru);
	mutex_unlock(&dev->struct_mutex);

	/*
	 * Determine where to move the buffer.
	 */
	ret = drm_bo_mem_space(bo, &mem, no_wait);
	if (ret)
		goto out_unlock;

	ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);

out_unlock:
	mutex_lock(&dev->struct_mutex);
	if (ret || !move_unfenced) {
		if (mem.mm_node) {
			if (mem.mm_node != bo->pinned_node)
				drm_mm_put_block(mem.mm_node);
			mem.mm_node = NULL;
		}
		drm_bo_add_to_lru(bo);
		if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
			wake_up_all(&bo->event_queue);
			DRM_FLAG_MASKED(bo->priv_flags, 0,
					_DRM_BO_FLAG_UNFENCED);
		}
	} else {
		list_add_tail(&bo->lru, &bm->unfenced);
		DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
				_DRM_BO_FLAG_UNFENCED);
	}
	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&bm->evict_mutex);
	return ret;
}

static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
{
	uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);

	if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
		return 0;
	if ((flag_diff & DRM_BO_FLAG_CACHED) &&
	    (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
	     (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
		return 0;

	if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
	    ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
	     (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
		return 0;
	return 1;
}

/**
 * drm_buffer_object_validate:
 *
 * @bo: the buffer object to modify
 *
 * @fence_class: the new fence class covering this buffer
 *
 * @move_unfenced: a boolean indicating whether switching the
 * memory space of this buffer should cause the buffer to
 * be placed on the unfenced list.
 *
 * @no_wait: whether this function should return -EBUSY instead
 * of waiting.
 *
 * Change buffer access parameters. This can involve moving
 * the buffer to the correct memory type, pinning the buffer
 * or changing the class/type of fence covering this buffer
 *
 * Must be called with bo locked.
 */

static int drm_buffer_object_validate(struct drm_buffer_object *bo,
				      uint32_t fence_class,
				      int move_unfenced, int no_wait,
				      int move_buffer)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	int ret;

	if (move_buffer) {
		ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
					 move_unfenced);
		if (ret) {
			if (ret != -EAGAIN)
				DRM_ERROR("Failed moving buffer.\n");
			if (ret == -ENOMEM)
				DRM_ERROR("Out of aperture space or "
					  "DRM memory quota.\n");
			return ret;
		}
	}

	/*
	 * Pinned buffers.
	 */

	if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
		bo->pinned_mem_type = bo->mem.mem_type;
		mutex_lock(&dev->struct_mutex);
		list_del_init(&bo->pinned_lru);
		drm_bo_add_to_pinned_lru(bo);

		if (bo->pinned_node != bo->mem.mm_node) {
			if (bo->pinned_node != NULL)
				drm_mm_put_block(bo->pinned_node);
			bo->pinned_node = bo->mem.mm_node;
		}

		mutex_unlock(&dev->struct_mutex);

	} else if (bo->pinned_node != NULL) {

		mutex_lock(&dev->struct_mutex);

		if (bo->pinned_node != bo->mem.mm_node)
			drm_mm_put_block(bo->pinned_node);

		list_del_init(&bo->pinned_lru);
		bo->pinned_node = NULL;
		mutex_unlock(&dev->struct_mutex);

	}

	/*
	 * We might need to add a TTM.
	 */

	if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
		ret = drm_bo_add_ttm(bo);
		if (ret)
			return ret;
	}
	/*
	 * Validation has succeeded, move the access and other
	 * non-mapping-related flag bits from the proposed flags to
	 * the active flags
	 */

	DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);

	/*
	 * Finally, adjust lru to be sure.
	 */

	mutex_lock(&dev->struct_mutex);
	list_del(&bo->lru);
	if (move_unfenced) {
		list_add_tail(&bo->lru, &bm->unfenced);
		DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
				_DRM_BO_FLAG_UNFENCED);
	} else {
		drm_bo_add_to_lru(bo);
		if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
			wake_up_all(&bo->event_queue);
			DRM_FLAG_MASKED(bo->priv_flags, 0,
					_DRM_BO_FLAG_UNFENCED);
		}
	}
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

/*
 * This function is called with bo->mutex locked, but may release it
 * temporarily to wait for events.
 */

static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
				       uint64_t flags,
				       uint64_t mask,
				       uint32_t hint,
				       uint32_t fence_class,
				       int no_wait,
				       int *move_buffer)
{
	struct drm_device *dev = bo->dev;
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	uint32_t ftype;

	int ret;

	DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
		  (unsigned long long) bo->mem.proposed_flags,
		  (unsigned long long) bo->mem.flags);

	ret = drm_bo_modify_proposed_flags (bo, flags, mask);
	if (ret)
		return ret;

	ret = drm_bo_wait_unmapped(bo, no_wait);
	if (ret)
		return ret;

	ret = driver->fence_type(bo, &fence_class, &ftype);

	if (ret) {
		DRM_ERROR("Driver did not support given buffer permissions.\n");
		return ret;
	}

	/*
	 * We're switching command submission mechanism,
	 * or cannot simply rely on the hardware serializing for us.
	 * Insert a driver-dependant barrier or wait for buffer idle.
	 */

	if ((fence_class != bo->fence_class) ||
	    ((ftype ^ bo->fence_type) & bo->fence_type)) {

		ret = -EINVAL;
		if (driver->command_stream_barrier) {
			ret = driver->command_stream_barrier(bo,
							     fence_class,
							     ftype,
							     no_wait);
		}
		if (ret && ret != -EAGAIN) 
			ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
		
		if (ret)
			return ret;
	}

	bo->new_fence_class = fence_class;
	bo->new_fence_type = ftype;

	/*
	 * Check whether we need to move buffer.
	 */

	*move_buffer = 0;
	if (!drm_bo_mem_compat(&bo->mem)) {
		*move_buffer = 1;
		ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
	}

	return ret;
}

/**
 * drm_bo_do_validate:
 *
 * @bo:	the buffer object
 *
 * @flags: access rights, mapping parameters and cacheability. See
 * the DRM_BO_FLAG_* values in drm.h
 *
 * @mask: Which flag values to change; this allows callers to modify
 * things without knowing the current state of other flags.
 *
 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
 * values in drm.h.
 *
 * @fence_class: a driver-specific way of doing fences. Presumably,
 * this would be used if the driver had more than one submission and
 * fencing mechanism. At this point, there isn't any use of this
 * from the user mode code.
 *
 * @rep: To be stuffed with the reply from validation
 * 
 * 'validate' a buffer object. This changes where the buffer is
 * located, along with changing access modes.
 */

int drm_bo_do_validate(struct drm_buffer_object *bo,
		       uint64_t flags, uint64_t mask, uint32_t hint,
		       uint32_t fence_class,
		       struct drm_bo_info_rep *rep)
{
	int ret;
	int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
	int move_buffer;

	mutex_lock(&bo->mutex);

	do {
		bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;

		ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
						  fence_class, no_wait,
						  &move_buffer);
		if (ret)
			goto out;

	} while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));

	ret = drm_buffer_object_validate(bo,
					 fence_class,
					 !(hint & DRM_BO_HINT_DONT_FENCE),
					 no_wait,
					 move_buffer);

	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
out:
	if (rep)
		drm_bo_fill_rep_arg(bo, rep);

	mutex_unlock(&bo->mutex);

	return ret;
}
EXPORT_SYMBOL(drm_bo_do_validate);

/**
 * drm_bo_handle_validate
 *
 * @file_priv: the drm file private, used to get a handle to the user context
 *
 * @handle: the buffer object handle
 *
 * @flags: access rights, mapping parameters and cacheability. See
 * the DRM_BO_FLAG_* values in drm.h
 *
 * @mask: Which flag values to change; this allows callers to modify
 * things without knowing the current state of other flags.
 *
 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
 * values in drm.h.
 *
 * @fence_class: a driver-specific way of doing fences. Presumably,
 * this would be used if the driver had more than one submission and
 * fencing mechanism. At this point, there isn't any use of this
 * from the user mode code.
 *
 * @rep: To be stuffed with the reply from validation
 *
 * @bp_rep: To be stuffed with the buffer object pointer
 *
 * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
 * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
 * This is a convenience wrapper only.
 */

int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
			   uint64_t flags, uint64_t mask,
			   uint32_t hint,
			   uint32_t fence_class,
			   struct drm_bo_info_rep *rep,
			   struct drm_buffer_object **bo_rep)
{
	struct drm_device *dev = file_priv->minor->dev;
	struct drm_buffer_object *bo;
	int ret;

	mutex_lock(&dev->struct_mutex);
	bo = drm_lookup_buffer_object(file_priv, handle, 1);
	mutex_unlock(&dev->struct_mutex);

	if (!bo)
		return -EINVAL;

	if (bo->base.owner != file_priv)
		mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);

	ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);

	if (!ret && bo_rep)
		*bo_rep = bo;
	else
		drm_bo_usage_deref_unlocked(&bo);

	return ret;
}
EXPORT_SYMBOL(drm_bo_handle_validate);


static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
			      struct drm_bo_info_rep *rep)
{
	struct drm_device *dev = file_priv->minor->dev;
	struct drm_buffer_object *bo;

	mutex_lock(&dev->struct_mutex);
	bo = drm_lookup_buffer_object(file_priv, handle, 1);
	mutex_unlock(&dev->struct_mutex);

	if (!bo)
		return -EINVAL;

	mutex_lock(&bo->mutex);

	/*
	 * FIXME: Quick busy here?
	 */

	drm_bo_busy(bo, 1);
	drm_bo_fill_rep_arg(bo, rep);
	mutex_unlock(&bo->mutex);
	drm_bo_usage_deref_unlocked(&bo);
	return 0;
}

static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
			      uint32_t hint,
			      struct drm_bo_info_rep *rep)
{
	struct drm_device *dev = file_priv->minor->dev;
	struct drm_buffer_object *bo;
	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
	int ret;

	mutex_lock(&dev->struct_mutex);
	bo = drm_lookup_buffer_object(file_priv, handle, 1);
	mutex_unlock(&dev->struct_mutex);

	if (!bo)
		return -EINVAL;

	mutex_lock(&bo->mutex);
	ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
	if (ret)
		goto out;

	drm_bo_fill_rep_arg(bo, rep);
out:
	mutex_unlock(&bo->mutex);
	drm_bo_usage_deref_unlocked(&bo);
	return ret;
}

int drm_buffer_object_create(struct drm_device *dev,
			     unsigned long size,
			     enum drm_bo_type type,
			     uint64_t flags,
			     uint32_t hint,
			     uint32_t page_alignment,
			     unsigned long buffer_start,
			     struct drm_buffer_object **buf_obj)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_buffer_object *bo;
	int ret = 0;
	unsigned long num_pages;

	size += buffer_start & ~PAGE_MASK;
	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
		DRM_ERROR("Illegal buffer object size %ld.\n", size);
		return -EINVAL;
	}

	bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);

	if (!bo)
		return -ENOMEM;

	mutex_init(&bo->mutex);
	mutex_lock(&bo->mutex);

	atomic_set(&bo->usage, 1);
	atomic_set(&bo->mapped, 0);
	DRM_INIT_WAITQUEUE(&bo->event_queue);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->pinned_lru);
	INIT_LIST_HEAD(&bo->ddestroy);
#ifdef DRM_ODD_MM_COMPAT
	INIT_LIST_HEAD(&bo->p_mm_list);
	INIT_LIST_HEAD(&bo->vma_list);
#endif
	bo->dev = dev;
	bo->type = type;
	bo->num_pages = num_pages;
	bo->mem.mem_type = DRM_BO_MEM_LOCAL;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
	bo->buffer_start = buffer_start & PAGE_MASK;
	bo->priv_flags = 0;
	bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
			 DRM_BO_FLAG_MAPPABLE);
	bo->mem.proposed_flags = 0;
	atomic_inc(&bm->count);
	/*
	 * Use drm_bo_modify_proposed_flags to error-check the proposed flags
	 */
	ret = drm_bo_modify_proposed_flags (bo, flags, flags);
	if (ret)
		goto out_err;

	/*
	 * For drm_bo_type_device buffers, allocate
	 * address space from the device so that applications
	 * can mmap the buffer from there
	 */
	if (bo->type == drm_bo_type_device) {
		mutex_lock(&dev->struct_mutex);
		ret = drm_bo_setup_vm_locked(bo);
		mutex_unlock(&dev->struct_mutex);
		if (ret)
			goto out_err;
	}

	mutex_unlock(&bo->mutex);
	ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
				 0, NULL);
	if (ret)
		goto out_err_unlocked;

	*buf_obj = bo;
	return 0;

out_err:
	mutex_unlock(&bo->mutex);
out_err_unlocked:
	drm_bo_usage_deref_unlocked(&bo);
	return ret;
}
EXPORT_SYMBOL(drm_buffer_object_create);


static int drm_bo_add_user_object(struct drm_file *file_priv,
				  struct drm_buffer_object *bo, int shareable)
{
	struct drm_device *dev = file_priv->minor->dev;
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = drm_add_user_object(file_priv, &bo->base, shareable);
	if (ret)
		goto out;

	bo->base.remove = drm_bo_base_deref_locked;
	bo->base.type = drm_buffer_type;
	bo->base.ref_struct_locked = NULL;
	bo->base.unref = drm_buffer_user_object_unmap;

out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_bo_create_arg *arg = data;
	struct drm_bo_create_req *req = &arg->d.req;
	struct drm_bo_info_rep *rep = &arg->d.rep;
	struct drm_buffer_object *entry;
	enum drm_bo_type bo_type;
	int ret = 0;

	DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
	    (int)(req->size / 1024), req->page_alignment * 4);

	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	/*
	 * If the buffer creation request comes in with a starting address,
	 * that points at the desired user pages to map. Otherwise, create
	 * a drm_bo_type_device buffer, which uses pages allocated from the kernel
	 */
	bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;

	/*
	 * User buffers cannot be shared
	 */
	if (bo_type == drm_bo_type_user)
		req->flags &= ~DRM_BO_FLAG_SHAREABLE;

	ret = drm_buffer_object_create(file_priv->minor->dev,
				       req->size, bo_type, req->flags,
				       req->hint, req->page_alignment,
				       req->buffer_start, &entry);
	if (ret)
		goto out;

	ret = drm_bo_add_user_object(file_priv, entry,
				     req->flags & DRM_BO_FLAG_SHAREABLE);
	if (ret) {
		drm_bo_usage_deref_unlocked(&entry);
		goto out;
	}

	mutex_lock(&entry->mutex);
	drm_bo_fill_rep_arg(entry, rep);
	mutex_unlock(&entry->mutex);

out:
	return ret;
}

int drm_bo_setstatus_ioctl(struct drm_device *dev,
			   void *data, struct drm_file *file_priv)
{
	struct drm_bo_map_wait_idle_arg *arg = data;
	struct drm_bo_info_req *req = &arg->d.req;
	struct drm_bo_info_rep *rep = &arg->d.rep;
	struct drm_buffer_object *bo;
	int ret;

	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
	if (ret)
		return ret;

	mutex_lock(&dev->struct_mutex);
	bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
	mutex_unlock(&dev->struct_mutex);

	if (!bo)
		return -EINVAL;

	if (bo->base.owner != file_priv)
		req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);

	ret = drm_bo_do_validate(bo, req->flags, req->mask,
				 req->hint | DRM_BO_HINT_DONT_FENCE,
				 bo->fence_class, rep);

	drm_bo_usage_deref_unlocked(&bo);

	(void) drm_bo_read_unlock(&dev->bm.bm_lock);

	return ret;
}

int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_bo_map_wait_idle_arg *arg = data;
	struct drm_bo_info_req *req = &arg->d.req;
	struct drm_bo_info_rep *rep = &arg->d.rep;
	int ret;
	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
				    req->hint, rep);
	if (ret)
		return ret;

	return 0;
}

int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_bo_handle_arg *arg = data;
	int ret;
	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	ret = drm_buffer_object_unmap(file_priv, arg->handle);
	return ret;
}


int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_bo_reference_info_arg *arg = data;
	struct drm_bo_handle_arg *req = &arg->d.req;
	struct drm_bo_info_rep *rep = &arg->d.rep;
	struct drm_user_object *uo;
	int ret;

	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	ret = drm_user_object_ref(file_priv, req->handle,
				  drm_buffer_type, &uo);
	if (ret)
		return ret;

	ret = drm_bo_handle_info(file_priv, req->handle, rep);
	if (ret)
		return ret;

	return 0;
}

int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_bo_handle_arg *arg = data;
	int ret = 0;

	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
	return ret;
}

int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_bo_reference_info_arg *arg = data;
	struct drm_bo_handle_arg *req = &arg->d.req;
	struct drm_bo_info_rep *rep = &arg->d.rep;
	int ret;

	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	ret = drm_bo_handle_info(file_priv, req->handle, rep);
	if (ret)
		return ret;

	return 0;
}

int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_bo_map_wait_idle_arg *arg = data;
	struct drm_bo_info_req *req = &arg->d.req;
	struct drm_bo_info_rep *rep = &arg->d.rep;
	int ret;
	if (!dev->bm.initialized) {
		DRM_ERROR("Buffer object manager is not initialized.\n");
		return -EINVAL;
	}

	ret = drm_bo_handle_wait(file_priv, req->handle,
				 req->hint, rep);
	if (ret)
		return ret;

	return 0;
}

static int drm_bo_leave_list(struct drm_buffer_object *bo,
			     uint32_t mem_type,
			     int free_pinned,
			     int allow_errors)
{
	struct drm_device *dev = bo->dev;
	int ret = 0;

	mutex_lock(&bo->mutex);

	ret = drm_bo_expire_fence(bo, allow_errors);
	if (ret)
		goto out;

	if (free_pinned) {
		DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
		mutex_lock(&dev->struct_mutex);
		list_del_init(&bo->pinned_lru);
		if (bo->pinned_node == bo->mem.mm_node)
			bo->pinned_node = NULL;
		if (bo->pinned_node != NULL) {
			drm_mm_put_block(bo->pinned_node);
			bo->pinned_node = NULL;
		}
		mutex_unlock(&dev->struct_mutex);
	}

	if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
		DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
			  "cleanup. Removing flag and evicting.\n");
		bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
		bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
	}

	if (bo->mem.mem_type == mem_type)
		ret = drm_bo_evict(bo, mem_type, 0);

	if (ret) {
		if (allow_errors) {
			goto out;
		} else {
			ret = 0;
			DRM_ERROR("Cleanup eviction failed\n");
		}
	}

out:
	mutex_unlock(&bo->mutex);
	return ret;
}


static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
					 int pinned_list)
{
	if (pinned_list)
		return list_entry(list, struct drm_buffer_object, pinned_lru);
	else
		return list_entry(list, struct drm_buffer_object, lru);
}

/*
 * dev->struct_mutex locked.
 */

static int drm_bo_force_list_clean(struct drm_device *dev,
				   struct list_head *head,
				   unsigned mem_type,
				   int free_pinned,
				   int allow_errors,
				   int pinned_list)
{
	struct list_head *list, *next, *prev;
	struct drm_buffer_object *entry, *nentry;
	int ret;
	int do_restart;

	/*
	 * The list traversal is a bit odd here, because an item may
	 * disappear from the list when we release the struct_mutex or
	 * when we decrease the usage count. Also we're not guaranteed
	 * to drain pinned lists, so we can't always restart.
	 */

restart:
	nentry = NULL;
	list_for_each_safe(list, next, head) {
		prev = list->prev;

		entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
		atomic_inc(&entry->usage);
		if (nentry) {
			atomic_dec(&nentry->usage);
			nentry = NULL;
		}

		/*
		 * Protect the next item from destruction, so we can check
		 * its list pointers later on.
		 */

		if (next != head) {
			nentry = drm_bo_entry(next, pinned_list);
			atomic_inc(&nentry->usage);
		}
		mutex_unlock(&dev->struct_mutex);

		ret = drm_bo_leave_list(entry, mem_type, free_pinned,
					allow_errors);
		mutex_lock(&dev->struct_mutex);

		drm_bo_usage_deref_locked(&entry);
		if (ret)
			return ret;

		/*
		 * Has the next item disappeared from the list?
		 */

		do_restart = ((next->prev != list) && (next->prev != prev));

		if (nentry != NULL && do_restart)
			drm_bo_usage_deref_locked(&nentry);

		if (do_restart)
			goto restart;
	}
	return 0;
}

int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_mem_type_manager *man = &bm->man[mem_type];
	int ret = -EINVAL;

	if (mem_type >= DRM_BO_MEM_TYPES) {
		DRM_ERROR("Illegal memory type %d\n", mem_type);
		return ret;
	}

	if (!man->has_type) {
		DRM_ERROR("Trying to take down uninitialized "
			  "memory manager type %u\n", mem_type);
		return ret;
	}

	if ((man->kern_init_type) && (kern_clean == 0)) {
		DRM_ERROR("Trying to take down kernel initialized "
			  "memory manager type %u\n", mem_type);
		return -EPERM;
	}

	man->use_type = 0;
	man->has_type = 0;

	ret = 0;
	if (mem_type > 0) {
		BUG_ON(!list_empty(&bm->unfenced));
		drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
		drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);

		if (drm_mm_clean(&man->manager)) {
			drm_mm_takedown(&man->manager);
		} else {
			ret = -EBUSY;
		}
	}

	return ret;
}
EXPORT_SYMBOL(drm_bo_clean_mm);

/**
 *Evict all buffers of a particular mem_type, but leave memory manager
 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
 *point since we have the hardware lock.
 */

static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
{
	int ret;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_mem_type_manager *man = &bm->man[mem_type];

	if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
		DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
		return -EINVAL;
	}

	if (!man->has_type) {
		DRM_ERROR("Memory type %u has not been initialized.\n",
			  mem_type);
		return 0;
	}

	ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
	if (ret)
		return ret;
	ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);

	return ret;
}

int drm_bo_init_mm(struct drm_device *dev, unsigned type,
		   unsigned long p_offset, unsigned long p_size,
		   int kern_init)
{
	struct drm_buffer_manager *bm = &dev->bm;
	int ret = -EINVAL;
	struct drm_mem_type_manager *man;

	if (type >= DRM_BO_MEM_TYPES) {
		DRM_ERROR("Illegal memory type %d\n", type);
		return ret;
	}

	man = &bm->man[type];
	if (man->has_type) {
		DRM_ERROR("Memory manager already initialized for type %d\n",
			  type);
		return ret;
	}

	ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
	if (ret)
		return ret;

	ret = 0;
	if (type != DRM_BO_MEM_LOCAL) {
		if (!p_size) {
			DRM_ERROR("Zero size memory manager type %d\n", type);
			return ret;
		}
		ret = drm_mm_init(&man->manager, p_offset, p_size);
		if (ret)
			return ret;
	}
	man->has_type = 1;
	man->use_type = 1;
	man->kern_init_type = kern_init;
	man->size = p_size;

	INIT_LIST_HEAD(&man->lru);
	INIT_LIST_HEAD(&man->pinned);

	return 0;
}
EXPORT_SYMBOL(drm_bo_init_mm);

/*
 * This function is intended to be called on drm driver unload.
 * If you decide to call it from lastclose, you must protect the call
 * from a potentially racing drm_bo_driver_init in firstopen.
 * (This may happen on X server restart).
 */

int drm_bo_driver_finish(struct drm_device *dev)
{
	struct drm_buffer_manager *bm = &dev->bm;
	int ret = 0;
	unsigned i = DRM_BO_MEM_TYPES;
	struct drm_mem_type_manager *man;

	mutex_lock(&dev->struct_mutex);

	if (!bm->initialized)
		goto out;
	bm->initialized = 0;

	while (i--) {
		man = &bm->man[i];
		if (man->has_type) {
			man->use_type = 0;
			if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
				ret = -EBUSY;
				DRM_ERROR("DRM memory manager type %d "
					  "is not clean.\n", i);
			}
			man->has_type = 0;
		}
	}
	mutex_unlock(&dev->struct_mutex);

	if (!cancel_delayed_work(&bm->wq))
		flush_scheduled_work();

	mutex_lock(&dev->struct_mutex);
	drm_bo_delayed_delete(dev, 1);
	if (list_empty(&bm->ddestroy))
		DRM_DEBUG("Delayed destroy list was clean\n");

	if (list_empty(&bm->man[0].lru))
		DRM_DEBUG("Swap list was clean\n");

	if (list_empty(&bm->man[0].pinned))
		DRM_DEBUG("NO_MOVE list was clean\n");

	if (list_empty(&bm->unfenced))
		DRM_DEBUG("Unfenced list was clean\n");

	if (bm->dummy_read_page) {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
		ClearPageReserved(bm->dummy_read_page);
#endif
		__free_page(bm->dummy_read_page);
	}

out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
EXPORT_SYMBOL(drm_bo_driver_finish);

/*
 * This function is intended to be called on drm driver load.
 * If you decide to call it from firstopen, you must protect the call
 * from a potentially racing drm_bo_driver_finish in lastclose.
 * (This may happen on X server restart).
 */

int drm_bo_driver_init(struct drm_device *dev)
{
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	struct drm_buffer_manager *bm = &dev->bm;
	int ret = -EINVAL;

	bm->dummy_read_page = NULL;
	drm_bo_init_lock(&bm->bm_lock);
	mutex_lock(&dev->struct_mutex);
	if (!driver)
		goto out_unlock;

	bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
	if (!bm->dummy_read_page) {
		ret = -ENOMEM;
		goto out_unlock;
	}

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
	SetPageReserved(bm->dummy_read_page);
#endif

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
	ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
	if (ret) {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
		ClearPageReserved(bm->dummy_read_page);
#endif
		__free_page(bm->dummy_read_page);
		bm->dummy_read_page = NULL;
		goto out_unlock;
	}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
#else
	INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
#endif
	bm->initialized = 1;
	bm->nice_mode = 1;
	atomic_set(&bm->count, 0);
	bm->cur_pages = 0;
	INIT_LIST_HEAD(&bm->unfenced);
	INIT_LIST_HEAD(&bm->ddestroy);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
EXPORT_SYMBOL(drm_bo_driver_init);

int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_mm_init_arg *arg = data;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	int ret;

	if (!driver) {
		DRM_ERROR("Buffer objects are not supported by this driver\n");
		return -EINVAL;
	}

	ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
	if (ret)
		return ret;

	ret = -EINVAL;
	if (arg->magic != DRM_BO_INIT_MAGIC) {
		DRM_ERROR("You are using an old libdrm that is not compatible with\n"
			  "\tthe kernel DRM module. Please upgrade your libdrm.\n");
		return -EINVAL;
	}
	if (arg->major != DRM_BO_INIT_MAJOR) {
		DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
			  "\tversion don't match. Got %d, expected %d.\n",
			  arg->major, DRM_BO_INIT_MAJOR);
		return -EINVAL;
	}

	mutex_lock(&dev->struct_mutex);
	if (!bm->initialized) {
		DRM_ERROR("DRM memory manager was not initialized.\n");
		goto out;
	}
	if (arg->mem_type == 0) {
		DRM_ERROR("System memory buffers already initialized.\n");
		goto out;
	}
	ret = drm_bo_init_mm(dev, arg->mem_type,
			     arg->p_offset, arg->p_size, 0);

out:
	mutex_unlock(&dev->struct_mutex);
	(void) drm_bo_write_unlock(&bm->bm_lock, file_priv);

	if (ret)
		return ret;

	return 0;
}

int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_mm_type_arg *arg = data;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	int ret;

	if (!driver) {
		DRM_ERROR("Buffer objects are not supported by this driver\n");
		return -EINVAL;
	}

	ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
	if (ret)
		return ret;

	mutex_lock(&dev->struct_mutex);
	ret = -EINVAL;
	if (!bm->initialized) {
		DRM_ERROR("DRM memory manager was not initialized\n");
		goto out;
	}
	if (arg->mem_type == 0) {
		DRM_ERROR("No takedown for System memory buffers.\n");
		goto out;
	}
	ret = 0;
	if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
		if (ret == -EINVAL)
			DRM_ERROR("Memory manager type %d not clean. "
				  "Delaying takedown\n", arg->mem_type);
		ret = 0;
	}
out:
	mutex_unlock(&dev->struct_mutex);
	(void) drm_bo_write_unlock(&bm->bm_lock, file_priv);

	if (ret)
		return ret;

	return 0;
}

int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_mm_type_arg *arg = data;
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	int ret;

	if (!driver) {
		DRM_ERROR("Buffer objects are not supported by this driver\n");
		return -EINVAL;
	}

	if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
		DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
		return -EINVAL;
	}

	if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
		ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
		if (ret)
			return ret;
	}

	mutex_lock(&dev->struct_mutex);
	ret = drm_bo_lock_mm(dev, arg->mem_type);
	mutex_unlock(&dev->struct_mutex);
	if (ret) {
		(void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
		return ret;
	}

	return 0;
}

int drm_mm_unlock_ioctl(struct drm_device *dev,
			void *data,
			struct drm_file *file_priv)
{
	struct drm_mm_type_arg *arg = data;
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	int ret;

	if (!driver) {
		DRM_ERROR("Buffer objects are not supported by this driver\n");
		return -EINVAL;
	}

	if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
		ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
		if (ret)
			return ret;
	}

	return 0;
}

int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_mm_info_arg *arg = data;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	struct drm_mem_type_manager *man;
	int ret = 0;
	int mem_type = arg->mem_type;

	if (!driver) {
		DRM_ERROR("Buffer objects are not supported by this driver\n");
		return -EINVAL;
	}

	if (mem_type >= DRM_BO_MEM_TYPES) {
		DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
		return -EINVAL;
	}

	mutex_lock(&dev->struct_mutex);
	if (!bm->initialized) {
		DRM_ERROR("DRM memory manager was not initialized\n");
		ret = -EINVAL;
		goto out;
	}


	man = &bm->man[arg->mem_type];

	arg->p_size = man->size;

out:
	mutex_unlock(&dev->struct_mutex);
     
	return ret;
}
/*
 * buffer object vm functions.
 */

int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_mem_type_manager *man = &bm->man[mem->mem_type];

	if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
		if (mem->mem_type == DRM_BO_MEM_LOCAL)
			return 0;

		if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
			return 0;

		if (mem->flags & DRM_BO_FLAG_CACHED)
			return 0;
	}
	return 1;
}
EXPORT_SYMBOL(drm_mem_reg_is_pci);

/**
 * \c Get the PCI offset for the buffer object memory.
 *
 * \param bo The buffer object.
 * \param bus_base On return the base of the PCI region
 * \param bus_offset On return the byte offset into the PCI region
 * \param bus_size On return the byte size of the buffer object or zero if
 *     the buffer object memory is not accessible through a PCI region.
 * \return Failure indication.
 *
 * Returns -EINVAL if the buffer object is currently not mappable.
 * Otherwise returns zero.
 */

int drm_bo_pci_offset(struct drm_device *dev,
		      struct drm_bo_mem_reg *mem,
		      unsigned long *bus_base,
		      unsigned long *bus_offset, unsigned long *bus_size)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_mem_type_manager *man = &bm->man[mem->mem_type];

	*bus_size = 0;
	if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
		return -EINVAL;

	if (drm_mem_reg_is_pci(dev, mem)) {
		*bus_offset = mem->mm_node->start << PAGE_SHIFT;
		*bus_size = mem->num_pages << PAGE_SHIFT;
		*bus_base = man->io_offset;
	}

	return 0;
}

/**
 * \c Kill all user-space virtual mappings of this buffer object.
 *
 * \param bo The buffer object.
 *
 * Call bo->mutex locked.
 */

void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
{
	struct drm_device *dev = bo->dev;
	loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
	loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;

	if (!dev->dev_mapping)
		return;

	unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
}

/**
 * drm_bo_takedown_vm_locked:
 *
 * @bo: the buffer object to remove any drm device mapping
 *
 * Remove any associated vm mapping on the drm device node that
 * would have been created for a drm_bo_type_device buffer
 */
static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
{
	struct drm_map_list *list;
	drm_local_map_t *map;
	struct drm_device *dev = bo->dev;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);
	if (bo->type != drm_bo_type_device)
		return;

	list = &bo->map_list;
	if (list->user_token) {
		drm_ht_remove_item(&dev->map_hash, &list->hash);
		list->user_token = 0;
	}
	if (list->file_offset_node) {
		drm_mm_put_block(list->file_offset_node);
		list->file_offset_node = NULL;
	}

	map = list->map;
	if (!map)
		return;

	drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
	list->map = NULL;
	list->user_token = 0ULL;
	drm_bo_usage_deref_locked(&bo);
}

/**
 * drm_bo_setup_vm_locked:
 *
 * @bo: the buffer to allocate address space for
 *
 * Allocate address space in the drm device so that applications
 * can mmap the buffer and access the contents. This only
 * applies to drm_bo_type_device objects as others are not
 * placed in the drm device address space.
 */
static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
{
	struct drm_map_list *list = &bo->map_list;
	drm_local_map_t *map;
	struct drm_device *dev = bo->dev;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);
	list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
	if (!list->map)
		return -ENOMEM;

	map = list->map;
	map->offset = 0;
	map->type = _DRM_TTM;
	map->flags = _DRM_REMOVABLE;
	map->size = bo->mem.num_pages * PAGE_SIZE;
	atomic_inc(&bo->usage);
	map->handle = (void *)bo;

	list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
						    bo->mem.num_pages, 0, 0);

	if (unlikely(!list->file_offset_node)) {
		drm_bo_takedown_vm_locked(bo);
		return -ENOMEM;
	}

	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
						  bo->mem.num_pages, 0);

	if (unlikely(!list->file_offset_node)) {
		drm_bo_takedown_vm_locked(bo);
		return -ENOMEM;
	}
		
	list->hash.key = list->file_offset_node->start;
	if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
		drm_bo_takedown_vm_locked(bo);
		return -ENOMEM;
	}

	list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;

	return 0;
}

int drm_bo_version_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;

	arg->major = DRM_BO_INIT_MAJOR;
	arg->minor = DRM_BO_INIT_MINOR;
	arg->patchlevel = DRM_BO_INIT_PATCH;

	return 0;
}