1791 lines
		
	
	
		
			46 KiB
		
	
	
	
		
			Diff
		
	
	
	
			
		
		
	
	
			1791 lines
		
	
	
		
			46 KiB
		
	
	
	
		
			Diff
		
	
	
	
| diff -r bb70e852004f libgo/runtime/chan.goc
 | |
| --- a/libgo/runtime/chan.goc	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/chan.goc	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -111,7 +111,7 @@
 | |
|  		mysg.releasetime = -1;
 | |
|  	}
 | |
|  
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  	if(c->closed)
 | |
|  		goto closed;
 | |
|  
 | |
| @@ -120,7 +120,7 @@
 | |
|  
 | |
|  	sg = dequeue(&c->recvq);
 | |
|  	if(sg != nil) {
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  
 | |
|  		gp = sg->g;
 | |
|  		gp->param = sg;
 | |
| @@ -133,7 +133,7 @@
 | |
|  	}
 | |
|  
 | |
|  	if(!block) {
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  		return false;
 | |
|  	}
 | |
|  
 | |
| @@ -142,10 +142,10 @@
 | |
|  	mysg.selectdone = nil;
 | |
|  	g->param = nil;
 | |
|  	enqueue(&c->sendq, &mysg);
 | |
| -	runtime_parkunlock(c, "chan send");
 | |
| +	runtime_parkunlock(&c->lock, "chan send");
 | |
|  
 | |
|  	if(g->param == nil) {
 | |
| -		runtime_lock(c);
 | |
| +		runtime_lock(&c->lock);
 | |
|  		if(!c->closed)
 | |
|  			runtime_throw("chansend: spurious wakeup");
 | |
|  		goto closed;
 | |
| @@ -162,16 +162,16 @@
 | |
|  
 | |
|  	if(c->qcount >= c->dataqsiz) {
 | |
|  		if(!block) {
 | |
| -			runtime_unlock(c);
 | |
| +			runtime_unlock(&c->lock);
 | |
|  			return false;
 | |
|  		}
 | |
|  		mysg.g = g;
 | |
|  		mysg.elem = nil;
 | |
|  		mysg.selectdone = nil;
 | |
|  		enqueue(&c->sendq, &mysg);
 | |
| -		runtime_parkunlock(c, "chan send");
 | |
| +		runtime_parkunlock(&c->lock, "chan send");
 | |
|  
 | |
| -		runtime_lock(c);
 | |
| +		runtime_lock(&c->lock);
 | |
|  		goto asynch;
 | |
|  	}
 | |
|  
 | |
| @@ -183,18 +183,18 @@
 | |
|  	sg = dequeue(&c->recvq);
 | |
|  	if(sg != nil) {
 | |
|  		gp = sg->g;
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  		if(sg->releasetime)
 | |
|  			sg->releasetime = runtime_cputicks();
 | |
|  		runtime_ready(gp);
 | |
|  	} else
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  	if(mysg.releasetime > 0)
 | |
|  		runtime_blockevent(mysg.releasetime - t0, 2);
 | |
|  	return true;
 | |
|  
 | |
|  closed:
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  	runtime_panicstring("send on closed channel");
 | |
|  	return false;  // not reached
 | |
|  }
 | |
| @@ -232,7 +232,7 @@
 | |
|  		mysg.releasetime = -1;
 | |
|  	}
 | |
|  
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  	if(c->dataqsiz > 0)
 | |
|  		goto asynch;
 | |
|  
 | |
| @@ -241,7 +241,7 @@
 | |
|  
 | |
|  	sg = dequeue(&c->sendq);
 | |
|  	if(sg != nil) {
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  
 | |
|  		if(ep != nil)
 | |
|  			runtime_memmove(ep, sg->elem, c->elemsize);
 | |
| @@ -257,7 +257,7 @@
 | |
|  	}
 | |
|  
 | |
|  	if(!block) {
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  		return false;
 | |
|  	}
 | |
|  
 | |
| @@ -266,10 +266,10 @@
 | |
|  	mysg.selectdone = nil;
 | |
|  	g->param = nil;
 | |
|  	enqueue(&c->recvq, &mysg);
 | |
| -	runtime_parkunlock(c, "chan receive");
 | |
| +	runtime_parkunlock(&c->lock, "chan receive");
 | |
|  
 | |
|  	if(g->param == nil) {
 | |
| -		runtime_lock(c);
 | |
| +		runtime_lock(&c->lock);
 | |
|  		if(!c->closed)
 | |
|  			runtime_throw("chanrecv: spurious wakeup");
 | |
|  		goto closed;
 | |
| @@ -287,7 +287,7 @@
 | |
|  			goto closed;
 | |
|  
 | |
|  		if(!block) {
 | |
| -			runtime_unlock(c);
 | |
| +			runtime_unlock(&c->lock);
 | |
|  			if(received != nil)
 | |
|  				*received = false;
 | |
|  			return false;
 | |
| @@ -296,9 +296,9 @@
 | |
|  		mysg.elem = nil;
 | |
|  		mysg.selectdone = nil;
 | |
|  		enqueue(&c->recvq, &mysg);
 | |
| -		runtime_parkunlock(c, "chan receive");
 | |
| +		runtime_parkunlock(&c->lock, "chan receive");
 | |
|  
 | |
| -		runtime_lock(c);
 | |
| +		runtime_lock(&c->lock);
 | |
|  		goto asynch;
 | |
|  	}
 | |
|  
 | |
| @@ -312,12 +312,12 @@
 | |
|  	sg = dequeue(&c->sendq);
 | |
|  	if(sg != nil) {
 | |
|  		gp = sg->g;
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  		if(sg->releasetime)
 | |
|  			sg->releasetime = runtime_cputicks();
 | |
|  		runtime_ready(gp);
 | |
|  	} else
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  
 | |
|  	if(received != nil)
 | |
|  		*received = true;
 | |
| @@ -330,7 +330,7 @@
 | |
|  		runtime_memclr(ep, c->elemsize);
 | |
|  	if(received != nil)
 | |
|  		*received = false;
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  	if(mysg.releasetime > 0)
 | |
|  		runtime_blockevent(mysg.releasetime - t0, 2);
 | |
|  	return true;
 | |
| @@ -604,7 +604,7 @@
 | |
|  		c0 = sel->lockorder[i];
 | |
|  		if(c0 && c0 != c) {
 | |
|  			c = sel->lockorder[i];
 | |
| -			runtime_lock(c);
 | |
| +			runtime_lock(&c->lock);
 | |
|  		}
 | |
|  	}
 | |
|  }
 | |
| @@ -632,7 +632,7 @@
 | |
|  		c = sel->lockorder[i];
 | |
|  		if(i>0 && sel->lockorder[i-1] == c)
 | |
|  			continue;  // will unlock it on the next iteration
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| @@ -1017,9 +1017,9 @@
 | |
|  	if(runtime_gcwaiting())
 | |
|  		runtime_gosched();
 | |
|  
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  	if(c->closed) {
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  		runtime_panicstring("close of closed channel");
 | |
|  	}
 | |
|  	c->closed = true;
 | |
| @@ -1048,7 +1048,7 @@
 | |
|  		runtime_ready(gp);
 | |
|  	}
 | |
|  
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  }
 | |
|  
 | |
|  void
 | |
| diff -r bb70e852004f libgo/runtime/chan.h
 | |
| --- a/libgo/runtime/chan.h	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/chan.h	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -39,7 +39,7 @@
 | |
|  	uintgo	recvx;			// receive index
 | |
|  	WaitQ	recvq;			// list of recv waiters
 | |
|  	WaitQ	sendq;			// list of send waiters
 | |
| -	Lock;
 | |
| +	Lock	lock;
 | |
|  };
 | |
|  
 | |
|  // Buffer follows Hchan immediately in memory.
 | |
| diff -r bb70e852004f libgo/runtime/heapdump.c
 | |
| --- a/libgo/runtime/heapdump.c	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/heapdump.c	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -387,7 +387,7 @@
 | |
|  				if(sp->kind != KindSpecialFinalizer)
 | |
|  					continue;
 | |
|  				spf = (SpecialFinalizer*)sp;
 | |
| -				p = (byte*)((s->start << PageShift) + spf->offset);
 | |
| +				p = (byte*)((s->start << PageShift) + spf->special.offset);
 | |
|  				dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
 | |
|  			}
 | |
|  		}
 | |
| @@ -566,7 +566,7 @@
 | |
|  			if(sp->kind != KindSpecialProfile)
 | |
|  				continue;
 | |
|  			spp = (SpecialProfile*)sp;
 | |
| -			p = (byte*)((s->start << PageShift) + spp->offset);
 | |
| +			p = (byte*)((s->start << PageShift) + spp->special.offset);
 | |
|  			dumpint(TagAllocSample);
 | |
|  			dumpint((uintptr)p);
 | |
|  			dumpint((uintptr)spp->b);
 | |
| diff -r bb70e852004f libgo/runtime/malloc.goc
 | |
| --- a/libgo/runtime/malloc.goc	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/malloc.goc	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -429,9 +429,9 @@
 | |
|  	m->mcache->local_nlookup++;
 | |
|  	if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
 | |
|  		// purge cache stats to prevent overflow
 | |
| -		runtime_lock(&runtime_mheap);
 | |
| +		runtime_lock(&runtime_mheap.lock);
 | |
|  		runtime_purgecachedstats(m->mcache);
 | |
| -		runtime_unlock(&runtime_mheap);
 | |
| +		runtime_unlock(&runtime_mheap.lock);
 | |
|  	}
 | |
|  
 | |
|  	s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
 | |
| @@ -728,7 +728,7 @@
 | |
|  
 | |
|  static struct
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock	lock;
 | |
|  	byte*	pos;
 | |
|  	byte*	end;
 | |
|  } persistent;
 | |
| @@ -757,19 +757,19 @@
 | |
|  		align = 8;
 | |
|  	if(size >= PersistentAllocMaxBlock)
 | |
|  		return runtime_SysAlloc(size, stat);
 | |
| -	runtime_lock(&persistent);
 | |
| +	runtime_lock(&persistent.lock);
 | |
|  	persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
 | |
|  	if(persistent.pos + size > persistent.end) {
 | |
|  		persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
 | |
|  		if(persistent.pos == nil) {
 | |
| -			runtime_unlock(&persistent);
 | |
| +			runtime_unlock(&persistent.lock);
 | |
|  			runtime_throw("runtime: cannot allocate memory");
 | |
|  		}
 | |
|  		persistent.end = persistent.pos + PersistentAllocChunk;
 | |
|  	}
 | |
|  	p = persistent.pos;
 | |
|  	persistent.pos += size;
 | |
| -	runtime_unlock(&persistent);
 | |
| +	runtime_unlock(&persistent.lock);
 | |
|  	if(stat != &mstats.other_sys) {
 | |
|  		// reaccount the allocation against provided stat
 | |
|  		runtime_xadd64(stat, size);
 | |
| diff -r bb70e852004f libgo/runtime/malloc.h
 | |
| --- a/libgo/runtime/malloc.h	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/malloc.h	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -390,7 +390,7 @@
 | |
|  typedef struct SpecialFinalizer SpecialFinalizer;
 | |
|  struct SpecialFinalizer
 | |
|  {
 | |
| -	Special;
 | |
| +	Special		special;
 | |
|  	FuncVal*	fn;
 | |
|  	const FuncType*	ft;
 | |
|  	const PtrType*	ot;
 | |
| @@ -401,7 +401,7 @@
 | |
|  typedef struct SpecialProfile SpecialProfile;
 | |
|  struct SpecialProfile
 | |
|  {
 | |
| -	Special;
 | |
| +	Special	special;
 | |
|  	Bucket*	b;
 | |
|  };
 | |
|  
 | |
| @@ -458,7 +458,7 @@
 | |
|  // Central list of free objects of a given size.
 | |
|  struct MCentral
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock  lock;
 | |
|  	int32 sizeclass;
 | |
|  	MSpan nonempty;	// list of spans with a free object
 | |
|  	MSpan empty;	// list of spans with no free objects (or cached in an MCache)
 | |
| @@ -476,7 +476,7 @@
 | |
|  // but all the other global data is here too.
 | |
|  struct MHeap
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock lock;
 | |
|  	MSpan free[MaxMHeapList];	// free lists of given length
 | |
|  	MSpan freelarge;		// free lists length >= MaxMHeapList
 | |
|  	MSpan busy[MaxMHeapList];	// busy lists of large objects of given length
 | |
| @@ -505,7 +505,7 @@
 | |
|  	// spaced CacheLineSize bytes apart, so that each MCentral.Lock
 | |
|  	// gets its own cache line.
 | |
|  	struct {
 | |
| -		MCentral;
 | |
| +		MCentral mcentral;
 | |
|  		byte pad[64];
 | |
|  	} central[NumSizeClasses];
 | |
|  
 | |
| diff -r bb70e852004f libgo/runtime/mcache.c
 | |
| --- a/libgo/runtime/mcache.c	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/mcache.c	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -23,9 +23,9 @@
 | |
|  	MCache *c;
 | |
|  	int32 i;
 | |
|  
 | |
| -	runtime_lock(&runtime_mheap);
 | |
| +	runtime_lock(&runtime_mheap.lock);
 | |
|  	c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
 | |
| -	runtime_unlock(&runtime_mheap);
 | |
| +	runtime_unlock(&runtime_mheap.lock);
 | |
|  	runtime_memclr((byte*)c, sizeof(*c));
 | |
|  	for(i = 0; i < NumSizeClasses; i++)
 | |
|  		c->alloc[i] = &emptymspan;
 | |
| @@ -44,10 +44,10 @@
 | |
|  runtime_freemcache(MCache *c)
 | |
|  {
 | |
|  	runtime_MCache_ReleaseAll(c);
 | |
| -	runtime_lock(&runtime_mheap);
 | |
| +	runtime_lock(&runtime_mheap.lock);
 | |
|  	runtime_purgecachedstats(c);
 | |
|  	runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
 | |
| -	runtime_unlock(&runtime_mheap);
 | |
| +	runtime_unlock(&runtime_mheap.lock);
 | |
|  }
 | |
|  
 | |
|  // Gets a span that has a free object in it and assigns it
 | |
| @@ -64,19 +64,19 @@
 | |
|  	if(s->freelist != nil)
 | |
|  		runtime_throw("refill on a nonempty span");
 | |
|  	if(s != &emptymspan)
 | |
| -		runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s);
 | |
| +		runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass].mcentral, s);
 | |
|  
 | |
|  	// Push any explicitly freed objects to the central lists.
 | |
|  	// Not required, but it seems like a good time to do it.
 | |
|  	l = &c->free[sizeclass];
 | |
|  	if(l->nlist > 0) {
 | |
| -		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
 | |
| +		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
 | |
|  		l->list = nil;
 | |
|  		l->nlist = 0;
 | |
|  	}
 | |
|  
 | |
|  	// Get a new cached span from the central lists.
 | |
| -	s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]);
 | |
| +	s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass].mcentral);
 | |
|  	if(s == nil)
 | |
|  		runtime_throw("out of memory");
 | |
|  	if(s->freelist == nil) {
 | |
| @@ -102,7 +102,7 @@
 | |
|  	// We transfer a span at a time from MCentral to MCache,
 | |
|  	// so we'll do the same in the other direction.
 | |
|  	if(l->nlist >= (runtime_class_to_allocnpages[sizeclass]<<PageShift)/size) {
 | |
| -		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
 | |
| +		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
 | |
|  		l->list = nil;
 | |
|  		l->nlist = 0;
 | |
|  	}
 | |
| @@ -118,12 +118,12 @@
 | |
|  	for(i=0; i<NumSizeClasses; i++) {
 | |
|  		s = c->alloc[i];
 | |
|  		if(s != &emptymspan) {
 | |
| -			runtime_MCentral_UncacheSpan(&runtime_mheap.central[i], s);
 | |
| +			runtime_MCentral_UncacheSpan(&runtime_mheap.central[i].mcentral, s);
 | |
|  			c->alloc[i] = &emptymspan;
 | |
|  		}
 | |
|  		l = &c->free[i];
 | |
|  		if(l->nlist > 0) {
 | |
| -			runtime_MCentral_FreeList(&runtime_mheap.central[i], l->list);
 | |
| +			runtime_MCentral_FreeList(&runtime_mheap.central[i].mcentral, l->list);
 | |
|  			l->list = nil;
 | |
|  			l->nlist = 0;
 | |
|  		}
 | |
| diff -r bb70e852004f libgo/runtime/mcentral.c
 | |
| --- a/libgo/runtime/mcentral.c	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/mcentral.c	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -39,14 +39,14 @@
 | |
|  	int32 cap, n;
 | |
|  	uint32 sg;
 | |
|  
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  	sg = runtime_mheap.sweepgen;
 | |
|  retry:
 | |
|  	for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
 | |
|  		if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
 | |
| -			runtime_unlock(c);
 | |
| +			runtime_unlock(&c->lock);
 | |
|  			runtime_MSpan_Sweep(s);
 | |
| -			runtime_lock(c);
 | |
| +			runtime_lock(&c->lock);
 | |
|  			// the span could have been moved to heap, retry
 | |
|  			goto retry;
 | |
|  		}
 | |
| @@ -65,9 +65,9 @@
 | |
|  			runtime_MSpanList_Remove(s);
 | |
|  			// swept spans are at the end of the list
 | |
|  			runtime_MSpanList_InsertBack(&c->empty, s);
 | |
| -			runtime_unlock(c);
 | |
| +			runtime_unlock(&c->lock);
 | |
|  			runtime_MSpan_Sweep(s);
 | |
| -			runtime_lock(c);
 | |
| +			runtime_lock(&c->lock);
 | |
|  			// the span could be moved to nonempty or heap, retry
 | |
|  			goto retry;
 | |
|  		}
 | |
| @@ -82,7 +82,7 @@
 | |
|  
 | |
|  	// Replenish central list if empty.
 | |
|  	if(!MCentral_Grow(c)) {
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  		return nil;
 | |
|  	}
 | |
|  	goto retry;
 | |
| @@ -98,7 +98,7 @@
 | |
|  	runtime_MSpanList_Remove(s);
 | |
|  	runtime_MSpanList_InsertBack(&c->empty, s);
 | |
|  	s->incache = true;
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  	return s;
 | |
|  }
 | |
|  
 | |
| @@ -109,7 +109,7 @@
 | |
|  	MLink *v;
 | |
|  	int32 cap, n;
 | |
|  
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  
 | |
|  	s->incache = false;
 | |
|  
 | |
| @@ -135,7 +135,7 @@
 | |
|  		runtime_MSpanList_Remove(s);
 | |
|  		runtime_MSpanList_Insert(&c->nonempty, s);
 | |
|  	}
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  }
 | |
|  
 | |
|  // Free the list of objects back into the central free list c.
 | |
| @@ -145,12 +145,12 @@
 | |
|  {
 | |
|  	MLink *next;
 | |
|  
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  	for(; start != nil; start = next) {
 | |
|  		next = start->next;
 | |
|  		MCentral_Free(c, start);
 | |
|  	}
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  }
 | |
|  
 | |
|  // Helper: free one object back into the central free list.
 | |
| @@ -193,7 +193,7 @@
 | |
|  	// If s is completely freed, return it to the heap.
 | |
|  	if(s->ref == 0) {
 | |
|  		MCentral_ReturnToHeap(c, s); // unlocks c
 | |
| -		runtime_lock(c);
 | |
| +		runtime_lock(&c->lock);
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| @@ -206,7 +206,7 @@
 | |
|  {
 | |
|  	if(s->incache)
 | |
|  		runtime_throw("freespan into cached span");
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  
 | |
|  	// Move to nonempty if necessary.
 | |
|  	if(s->freelist == nil) {
 | |
| @@ -227,7 +227,7 @@
 | |
|  	runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
 | |
|  
 | |
|  	if(s->ref != 0) {
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  		return false;
 | |
|  	}
 | |
|  
 | |
| @@ -260,12 +260,12 @@
 | |
|  	byte *p;
 | |
|  	MSpan *s;
 | |
|  
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  	runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
 | |
|  	s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0, 1);
 | |
|  	if(s == nil) {
 | |
|  		// TODO(rsc): Log out of memory
 | |
| -		runtime_lock(c);
 | |
| +		runtime_lock(&c->lock);
 | |
|  		return false;
 | |
|  	}
 | |
|  
 | |
| @@ -282,7 +282,7 @@
 | |
|  	*tailp = nil;
 | |
|  	runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
 | |
|  
 | |
| -	runtime_lock(c);
 | |
| +	runtime_lock(&c->lock);
 | |
|  	c->nfree += n;
 | |
|  	runtime_MSpanList_Insert(&c->nonempty, s);
 | |
|  	return true;
 | |
| @@ -301,7 +301,7 @@
 | |
|  	if(s->ref != 0)
 | |
|  		runtime_throw("ref wrong");
 | |
|  	c->nfree -= (s->npages << PageShift) / size;
 | |
| -	runtime_unlock(c);
 | |
| +	runtime_unlock(&c->lock);
 | |
|  	runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
 | |
|  	runtime_MHeap_Free(&runtime_mheap, s, 0);
 | |
|  }
 | |
| diff -r bb70e852004f libgo/runtime/mgc0.c
 | |
| --- a/libgo/runtime/mgc0.c	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/mgc0.c	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -225,7 +225,7 @@
 | |
|  	Note	alldone;
 | |
|  	ParFor	*markfor;
 | |
|  
 | |
| -	Lock;
 | |
| +	Lock	lock;
 | |
|  	byte	*chunk;
 | |
|  	uintptr	nchunk;
 | |
|  } work __attribute__((aligned(8)));
 | |
| @@ -1337,7 +1337,7 @@
 | |
|  				// retain everything it points to.
 | |
|  				spf = (SpecialFinalizer*)sp;
 | |
|  				// A finalizer can be set for an inner byte of an object, find object beginning.
 | |
| -				p = (void*)((s->start << PageShift) + spf->offset/s->elemsize*s->elemsize);
 | |
| +				p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
 | |
|  				enqueue1(&wbuf, (Obj){p, s->elemsize, 0});
 | |
|  				enqueue1(&wbuf, (Obj){(void*)&spf->fn, PtrSize, 0});
 | |
|  				enqueue1(&wbuf, (Obj){(void*)&spf->ft, PtrSize, 0});
 | |
| @@ -1378,7 +1378,7 @@
 | |
|  	b = (Workbuf*)runtime_lfstackpop(&work.empty);
 | |
|  	if(b == nil) {
 | |
|  		// Need to allocate.
 | |
| -		runtime_lock(&work);
 | |
| +		runtime_lock(&work.lock);
 | |
|  		if(work.nchunk < sizeof *b) {
 | |
|  			work.nchunk = 1<<20;
 | |
|  			work.chunk = runtime_SysAlloc(work.nchunk, &mstats.gc_sys);
 | |
| @@ -1388,7 +1388,7 @@
 | |
|  		b = (Workbuf*)work.chunk;
 | |
|  		work.chunk += sizeof *b;
 | |
|  		work.nchunk -= sizeof *b;
 | |
| -		runtime_unlock(&work);
 | |
| +		runtime_unlock(&work.lock);
 | |
|  	}
 | |
|  	b->nobj = 0;
 | |
|  	return b;
 | |
| @@ -1802,7 +1802,7 @@
 | |
|  		c->local_nsmallfree[cl] += nfree;
 | |
|  		c->local_cachealloc -= nfree * size;
 | |
|  		runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
 | |
| -		res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
 | |
| +		res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl].mcentral, s, nfree, head.next, end);
 | |
|  		//MCentral_FreeSpan updates sweepgen
 | |
|  	}
 | |
|  	return res;
 | |
| @@ -2147,10 +2147,10 @@
 | |
|  		return;
 | |
|  
 | |
|  	if(gcpercent == GcpercentUnknown) {	// first time through
 | |
| -		runtime_lock(&runtime_mheap);
 | |
| +		runtime_lock(&runtime_mheap.lock);
 | |
|  		if(gcpercent == GcpercentUnknown)
 | |
|  			gcpercent = readgogc();
 | |
| -		runtime_unlock(&runtime_mheap);
 | |
| +		runtime_unlock(&runtime_mheap.lock);
 | |
|  	}
 | |
|  	if(gcpercent < 0)
 | |
|  		return;
 | |
| @@ -2423,7 +2423,7 @@
 | |
|  
 | |
|  	// Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
 | |
|  	p = (uint64*)pauses->array;
 | |
| -	runtime_lock(&runtime_mheap);
 | |
| +	runtime_lock(&runtime_mheap.lock);
 | |
|  	n = mstats.numgc;
 | |
|  	if(n > nelem(mstats.pause_ns))
 | |
|  		n = nelem(mstats.pause_ns);
 | |
| @@ -2438,7 +2438,7 @@
 | |
|  	p[n] = mstats.last_gc;
 | |
|  	p[n+1] = mstats.numgc;
 | |
|  	p[n+2] = mstats.pause_total_ns;	
 | |
| -	runtime_unlock(&runtime_mheap);
 | |
| +	runtime_unlock(&runtime_mheap.lock);
 | |
|  	pauses->__count = n+3;
 | |
|  }
 | |
|  
 | |
| @@ -2446,14 +2446,14 @@
 | |
|  runtime_setgcpercent(int32 in) {
 | |
|  	int32 out;
 | |
|  
 | |
| -	runtime_lock(&runtime_mheap);
 | |
| +	runtime_lock(&runtime_mheap.lock);
 | |
|  	if(gcpercent == GcpercentUnknown)
 | |
|  		gcpercent = readgogc();
 | |
|  	out = gcpercent;
 | |
|  	if(in < 0)
 | |
|  		in = -1;
 | |
|  	gcpercent = in;
 | |
| -	runtime_unlock(&runtime_mheap);
 | |
| +	runtime_unlock(&runtime_mheap.lock);
 | |
|  	return out;
 | |
|  }
 | |
|  
 | |
| diff -r bb70e852004f libgo/runtime/mheap.c
 | |
| --- a/libgo/runtime/mheap.c	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/mheap.c	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -70,7 +70,7 @@
 | |
|  	runtime_MSpanList_Init(&h->freelarge);
 | |
|  	runtime_MSpanList_Init(&h->busylarge);
 | |
|  	for(i=0; i<nelem(h->central); i++)
 | |
| -		runtime_MCentral_Init(&h->central[i], i);
 | |
| +		runtime_MCentral_Init(&h->central[i].mcentral, i);
 | |
|  }
 | |
|  
 | |
|  void
 | |
| @@ -109,9 +109,9 @@
 | |
|  			runtime_MSpanList_Remove(s);
 | |
|  			// swept spans are at the end of the list
 | |
|  			runtime_MSpanList_InsertBack(list, s);
 | |
| -			runtime_unlock(h);
 | |
| +			runtime_unlock(&h->lock);
 | |
|  			n += runtime_MSpan_Sweep(s);
 | |
| -			runtime_lock(h);
 | |
| +			runtime_lock(&h->lock);
 | |
|  			if(n >= npages)
 | |
|  				return n;
 | |
|  			// the span could have been moved elsewhere
 | |
| @@ -156,7 +156,7 @@
 | |
|  	}
 | |
|  
 | |
|  	// Now sweep everything that is not yet swept.
 | |
| -	runtime_unlock(h);
 | |
| +	runtime_unlock(&h->lock);
 | |
|  	for(;;) {
 | |
|  		n = runtime_sweepone();
 | |
|  		if(n == (uintptr)-1)  // all spans are swept
 | |
| @@ -165,7 +165,7 @@
 | |
|  		if(reclaimed >= npage)
 | |
|  			break;
 | |
|  	}
 | |
| -	runtime_lock(h);
 | |
| +	runtime_lock(&h->lock);
 | |
|  }
 | |
|  
 | |
|  // Allocate a new span of npage pages from the heap
 | |
| @@ -175,7 +175,7 @@
 | |
|  {
 | |
|  	MSpan *s;
 | |
|  
 | |
| -	runtime_lock(h);
 | |
| +	runtime_lock(&h->lock);
 | |
|  	mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
 | |
|  	runtime_m()->mcache->local_cachealloc = 0;
 | |
|  	s = MHeap_AllocLocked(h, npage, sizeclass);
 | |
| @@ -191,7 +191,7 @@
 | |
|  				runtime_MSpanList_InsertBack(&h->busylarge, s);
 | |
|  		}
 | |
|  	}
 | |
| -	runtime_unlock(h);
 | |
| +	runtime_unlock(&h->lock);
 | |
|  	if(s != nil) {
 | |
|  		if(needzero && s->needzero)
 | |
|  			runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
 | |
| @@ -386,7 +386,7 @@
 | |
|  void
 | |
|  runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
 | |
|  {
 | |
| -	runtime_lock(h);
 | |
| +	runtime_lock(&h->lock);
 | |
|  	mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
 | |
|  	runtime_m()->mcache->local_cachealloc = 0;
 | |
|  	mstats.heap_inuse -= s->npages<<PageShift;
 | |
| @@ -395,7 +395,7 @@
 | |
|  		mstats.heap_objects--;
 | |
|  	}
 | |
|  	MHeap_FreeLocked(h, s);
 | |
| -	runtime_unlock(h);
 | |
| +	runtime_unlock(&h->lock);
 | |
|  }
 | |
|  
 | |
|  static void
 | |
| @@ -548,10 +548,10 @@
 | |
|  		runtime_noteclear(¬e);
 | |
|  		runtime_notetsleepg(¬e, tick);
 | |
|  
 | |
| -		runtime_lock(h);
 | |
| +		runtime_lock(&h->lock);
 | |
|  		unixnow = runtime_unixnanotime();
 | |
|  		if(unixnow - mstats.last_gc > forcegc) {
 | |
| -			runtime_unlock(h);
 | |
| +			runtime_unlock(&h->lock);
 | |
|  			// The scavenger can not block other goroutines,
 | |
|  			// otherwise deadlock detector can fire spuriously.
 | |
|  			// GC blocks other goroutines via the runtime_worldsema.
 | |
| @@ -561,11 +561,11 @@
 | |
|  			runtime_notetsleepg(¬e, -1);
 | |
|  			if(runtime_debug.gctrace > 0)
 | |
|  				runtime_printf("scvg%d: GC forced\n", k);
 | |
| -			runtime_lock(h);
 | |
| +			runtime_lock(&h->lock);
 | |
|  		}
 | |
|  		now = runtime_nanotime();
 | |
|  		scavenge(k, now, limit);
 | |
| -		runtime_unlock(h);
 | |
| +		runtime_unlock(&h->lock);
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| @@ -575,9 +575,9 @@
 | |
|  runtime_debug_freeOSMemory(void)
 | |
|  {
 | |
|  	runtime_gc(2);  // force GC and do eager sweep
 | |
| -	runtime_lock(&runtime_mheap);
 | |
| +	runtime_lock(&runtime_mheap.lock);
 | |
|  	scavenge(-1, ~(uintptr)0, 0);
 | |
| -	runtime_unlock(&runtime_mheap);
 | |
| +	runtime_unlock(&runtime_mheap.lock);
 | |
|  }
 | |
|  
 | |
|  // Initialize a new span with the given start and npages.
 | |
| @@ -752,11 +752,11 @@
 | |
|  	runtime_lock(&runtime_mheap.speciallock);
 | |
|  	s = runtime_FixAlloc_Alloc(&runtime_mheap.specialfinalizeralloc);
 | |
|  	runtime_unlock(&runtime_mheap.speciallock);
 | |
| -	s->kind = KindSpecialFinalizer;
 | |
| +	s->special.kind = KindSpecialFinalizer;
 | |
|  	s->fn = f;
 | |
|  	s->ft = ft;
 | |
|  	s->ot = ot;
 | |
| -	if(addspecial(p, s))
 | |
| +	if(addspecial(p, &s->special))
 | |
|  		return true;
 | |
|  
 | |
|  	// There was an old finalizer
 | |
| @@ -789,9 +789,9 @@
 | |
|  	runtime_lock(&runtime_mheap.speciallock);
 | |
|  	s = runtime_FixAlloc_Alloc(&runtime_mheap.specialprofilealloc);
 | |
|  	runtime_unlock(&runtime_mheap.speciallock);
 | |
| -	s->kind = KindSpecialProfile;
 | |
| +	s->special.kind = KindSpecialProfile;
 | |
|  	s->b = b;
 | |
| -	if(!addspecial(p, s))
 | |
| +	if(!addspecial(p, &s->special))
 | |
|  		runtime_throw("setprofilebucket: profile already set");
 | |
|  }
 | |
|  
 | |
| @@ -879,14 +879,14 @@
 | |
|  	// remove the span from whatever list it is in now
 | |
|  	if(s->sizeclass > 0) {
 | |
|  		// must be in h->central[x].empty
 | |
| -		c = &h->central[s->sizeclass];
 | |
| -		runtime_lock(c);
 | |
| +		c = &h->central[s->sizeclass].mcentral;
 | |
| +		runtime_lock(&c->lock);
 | |
|  		runtime_MSpanList_Remove(s);
 | |
| -		runtime_unlock(c);
 | |
| -		runtime_lock(h);
 | |
| +		runtime_unlock(&c->lock);
 | |
| +		runtime_lock(&h->lock);
 | |
|  	} else {
 | |
|  		// must be in h->busy/busylarge
 | |
| -		runtime_lock(h);
 | |
| +		runtime_lock(&h->lock);
 | |
|  		runtime_MSpanList_Remove(s);
 | |
|  	}
 | |
|  	// heap is locked now
 | |
| @@ -933,18 +933,18 @@
 | |
|  
 | |
|  	// place the span into a new list
 | |
|  	if(s->sizeclass > 0) {
 | |
| -		runtime_unlock(h);
 | |
| -		c = &h->central[s->sizeclass];
 | |
| -		runtime_lock(c);
 | |
| +		runtime_unlock(&h->lock);
 | |
| +		c = &h->central[s->sizeclass].mcentral;
 | |
| +		runtime_lock(&c->lock);
 | |
|  		// swept spans are at the end of the list
 | |
|  		runtime_MSpanList_InsertBack(&c->empty, s);
 | |
| -		runtime_unlock(c);
 | |
| +		runtime_unlock(&c->lock);
 | |
|  	} else {
 | |
|  		// Swept spans are at the end of lists.
 | |
|  		if(s->npages < nelem(h->free))
 | |
|  			runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
 | |
|  		else
 | |
|  			runtime_MSpanList_InsertBack(&h->busylarge, s);
 | |
| -		runtime_unlock(h);
 | |
| +		runtime_unlock(&h->lock);
 | |
|  	}
 | |
|  }
 | |
| diff -r bb70e852004f libgo/runtime/netpoll.goc
 | |
| --- a/libgo/runtime/netpoll.goc	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/netpoll.goc	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -53,7 +53,7 @@
 | |
|  	// pollReset, pollWait, pollWaitCanceled and runtime_netpollready (IO rediness notification)
 | |
|  	// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
 | |
|  	// in a lock-free way by all operations.
 | |
| -	Lock;		// protectes the following fields
 | |
| +	Lock	lock;	// protectes the following fields
 | |
|  	uintptr	fd;
 | |
|  	bool	closing;
 | |
|  	uintptr	seq;	// protects from stale timers and ready notifications
 | |
| @@ -68,7 +68,7 @@
 | |
|  
 | |
|  static struct
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock		lock;
 | |
|  	PollDesc*	first;
 | |
|  	// PollDesc objects must be type-stable,
 | |
|  	// because we can get ready notification from epoll/kqueue
 | |
| @@ -100,7 +100,7 @@
 | |
|  
 | |
|  func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
 | |
|  	pd = allocPollDesc();
 | |
| -	runtime_lock(pd);
 | |
| +	runtime_lock(&pd->lock);
 | |
|  	if(pd->wg != nil && pd->wg != READY)
 | |
|  		runtime_throw("runtime_pollOpen: blocked write on free descriptor");
 | |
|  	if(pd->rg != nil && pd->rg != READY)
 | |
| @@ -112,7 +112,7 @@
 | |
|  	pd->rd = 0;
 | |
|  	pd->wg = nil;
 | |
|  	pd->wd = 0;
 | |
| -	runtime_unlock(pd);
 | |
| +	runtime_unlock(&pd->lock);
 | |
|  
 | |
|  	errno = runtime_netpollopen(fd, pd);
 | |
|  }
 | |
| @@ -125,10 +125,10 @@
 | |
|  	if(pd->rg != nil && pd->rg != READY)
 | |
|  		runtime_throw("runtime_pollClose: blocked read on closing descriptor");
 | |
|  	runtime_netpollclose(pd->fd);
 | |
| -	runtime_lock(&pollcache);
 | |
| +	runtime_lock(&pollcache.lock);
 | |
|  	pd->link = pollcache.first;
 | |
|  	pollcache.first = pd;
 | |
| -	runtime_unlock(&pollcache);
 | |
| +	runtime_unlock(&pollcache.lock);
 | |
|  }
 | |
|  
 | |
|  func runtime_pollReset(pd *PollDesc, mode int) (err int) {
 | |
| @@ -169,9 +169,9 @@
 | |
|  func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
 | |
|  	G *rg, *wg;
 | |
|  
 | |
| -	runtime_lock(pd);
 | |
| +	runtime_lock(&pd->lock);
 | |
|  	if(pd->closing) {
 | |
| -		runtime_unlock(pd);
 | |
| +		runtime_unlock(&pd->lock);
 | |
|  		return;
 | |
|  	}
 | |
|  	pd->seq++;  // invalidate current timers
 | |
| @@ -226,7 +226,7 @@
 | |
|  		rg = netpollunblock(pd, 'r', false);
 | |
|  	if(pd->wd < 0)
 | |
|  		wg = netpollunblock(pd, 'w', false);
 | |
| -	runtime_unlock(pd);
 | |
| +	runtime_unlock(&pd->lock);
 | |
|  	if(rg)
 | |
|  		runtime_ready(rg);
 | |
|  	if(wg)
 | |
| @@ -236,7 +236,7 @@
 | |
|  func runtime_pollUnblock(pd *PollDesc) {
 | |
|  	G *rg, *wg;
 | |
|  
 | |
| -	runtime_lock(pd);
 | |
| +	runtime_lock(&pd->lock);
 | |
|  	if(pd->closing)
 | |
|  		runtime_throw("runtime_pollUnblock: already closing");
 | |
|  	pd->closing = true;
 | |
| @@ -252,7 +252,7 @@
 | |
|  		runtime_deltimer(&pd->wt);
 | |
|  		pd->wt.fv = nil;
 | |
|  	}
 | |
| -	runtime_unlock(pd);
 | |
| +	runtime_unlock(&pd->lock);
 | |
|  	if(rg)
 | |
|  		runtime_ready(rg);
 | |
|  	if(wg)
 | |
| @@ -280,13 +280,13 @@
 | |
|  void
 | |
|  runtime_netpolllock(PollDesc *pd)
 | |
|  {
 | |
| -	runtime_lock(pd);
 | |
| +	runtime_lock(&pd->lock);
 | |
|  }
 | |
|  
 | |
|  void
 | |
|  runtime_netpollunlock(PollDesc *pd)
 | |
|  {
 | |
| -	runtime_unlock(pd);
 | |
| +	runtime_unlock(&pd->lock);
 | |
|  }
 | |
|  
 | |
|  // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
 | |
| @@ -399,12 +399,12 @@
 | |
|  
 | |
|  	pd = (PollDesc*)arg.data;
 | |
|  	rg = wg = nil;
 | |
| -	runtime_lock(pd);
 | |
| +	runtime_lock(&pd->lock);
 | |
|  	// Seq arg is seq when the timer was set.
 | |
|  	// If it's stale, ignore the timer event.
 | |
|  	if(seq != pd->seq) {
 | |
|  		// The descriptor was reused or timers were reset.
 | |
| -		runtime_unlock(pd);
 | |
| +		runtime_unlock(&pd->lock);
 | |
|  		return;
 | |
|  	}
 | |
|  	if(read) {
 | |
| @@ -421,7 +421,7 @@
 | |
|  		runtime_atomicstorep(&pd->wt.fv, nil);  // full memory barrier between store to wd and load of wg in netpollunblock
 | |
|  		wg = netpollunblock(pd, 'w', false);
 | |
|  	}
 | |
| -	runtime_unlock(pd);
 | |
| +	runtime_unlock(&pd->lock);
 | |
|  	if(rg)
 | |
|  		runtime_ready(rg);
 | |
|  	if(wg)
 | |
| @@ -452,7 +452,7 @@
 | |
|  	PollDesc *pd;
 | |
|  	uint32 i, n;
 | |
|  
 | |
| -	runtime_lock(&pollcache);
 | |
| +	runtime_lock(&pollcache.lock);
 | |
|  	if(pollcache.first == nil) {
 | |
|  		n = PollBlockSize/sizeof(*pd);
 | |
|  		if(n == 0)
 | |
| @@ -467,6 +467,6 @@
 | |
|  	}
 | |
|  	pd = pollcache.first;
 | |
|  	pollcache.first = pd->link;
 | |
| -	runtime_unlock(&pollcache);
 | |
| +	runtime_unlock(&pollcache.lock);
 | |
|  	return pd;
 | |
|  }
 | |
| diff -r bb70e852004f libgo/runtime/proc.c
 | |
| --- a/libgo/runtime/proc.c	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/proc.c	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -302,7 +302,7 @@
 | |
|  
 | |
|  typedef struct Sched Sched;
 | |
|  struct Sched {
 | |
| -	Lock;
 | |
| +	Lock	lock;
 | |
|  
 | |
|  	uint64	goidgen;
 | |
|  	M*	midle;	 // idle m's waiting for work
 | |
| @@ -709,7 +709,7 @@
 | |
|  
 | |
|  	mp->fastrand = 0x49f6428aUL + mp->id + runtime_cputicks();
 | |
|  
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	mp->id = runtime_sched.mcount++;
 | |
|  	checkmcount();
 | |
|  	runtime_mpreinit(mp);
 | |
| @@ -720,7 +720,7 @@
 | |
|  	// runtime_NumCgoCall() iterates over allm w/o schedlock,
 | |
|  	// so we need to publish it safely.
 | |
|  	runtime_atomicstorep(&runtime_allm, mp);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  }
 | |
|  
 | |
|  // Mark gp ready to run.
 | |
| @@ -747,7 +747,7 @@
 | |
|  
 | |
|  	// Figure out how many CPUs to use during GC.
 | |
|  	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	n = runtime_gomaxprocs;
 | |
|  	if(n > runtime_ncpu)
 | |
|  		n = runtime_ncpu > 0 ? runtime_ncpu : 1;
 | |
| @@ -755,7 +755,7 @@
 | |
|  		n = MaxGcproc;
 | |
|  	if(n > runtime_sched.nmidle+1) // one M is currently running
 | |
|  		n = runtime_sched.nmidle+1;
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	return n;
 | |
|  }
 | |
|  
 | |
| @@ -764,14 +764,14 @@
 | |
|  {
 | |
|  	int32 n;
 | |
|  
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	n = runtime_gomaxprocs;
 | |
|  	if(n > runtime_ncpu)
 | |
|  		n = runtime_ncpu;
 | |
|  	if(n > MaxGcproc)
 | |
|  		n = MaxGcproc;
 | |
|  	n -= runtime_sched.nmidle+1; // one M is currently running
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	return n > 0;
 | |
|  }
 | |
|  
 | |
| @@ -781,7 +781,7 @@
 | |
|  	M *mp;
 | |
|  	int32 n, pos;
 | |
|  
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	pos = 0;
 | |
|  	for(n = 1; n < nproc; n++) {  // one M is currently running
 | |
|  		if(runtime_allp[pos]->mcache == m->mcache)
 | |
| @@ -794,7 +794,7 @@
 | |
|  		pos++;
 | |
|  		runtime_notewakeup(&mp->park);
 | |
|  	}
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  }
 | |
|  
 | |
|  // Similar to stoptheworld but best-effort and can be called several times.
 | |
| @@ -833,7 +833,7 @@
 | |
|  	P *p;
 | |
|  	bool wait;
 | |
|  
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	runtime_sched.stopwait = runtime_gomaxprocs;
 | |
|  	runtime_atomicstore((uint32*)&runtime_sched.gcwaiting, 1);
 | |
|  	preemptall();
 | |
| @@ -853,7 +853,7 @@
 | |
|  		runtime_sched.stopwait--;
 | |
|  	}
 | |
|  	wait = runtime_sched.stopwait > 0;
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  
 | |
|  	// wait for remaining P's to stop voluntarily
 | |
|  	if(wait) {
 | |
| @@ -887,7 +887,7 @@
 | |
|  	gp = runtime_netpoll(false);  // non-blocking
 | |
|  	injectglist(gp);
 | |
|  	add = needaddgcproc();
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	if(newprocs) {
 | |
|  		procresize(newprocs);
 | |
|  		newprocs = 0;
 | |
| @@ -911,7 +911,7 @@
 | |
|  		runtime_sched.sysmonwait = false;
 | |
|  		runtime_notewakeup(&runtime_sched.sysmonnote);
 | |
|  	}
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  
 | |
|  	while(p1) {
 | |
|  		p = p1;
 | |
| @@ -1346,9 +1346,9 @@
 | |
|  	}
 | |
|  
 | |
|  retry:
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	mput(m);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	runtime_notesleep(&m->park);
 | |
|  	runtime_noteclear(&m->park);
 | |
|  	if(m->helpgc) {
 | |
| @@ -1375,18 +1375,18 @@
 | |
|  	M *mp;
 | |
|  	void (*fn)(void);
 | |
|  
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	if(p == nil) {
 | |
|  		p = pidleget();
 | |
|  		if(p == nil) {
 | |
| -			runtime_unlock(&runtime_sched);
 | |
| +			runtime_unlock(&runtime_sched.lock);
 | |
|  			if(spinning)
 | |
|  				runtime_xadd(&runtime_sched.nmspinning, -1);
 | |
|  			return;
 | |
|  		}
 | |
|  	}
 | |
|  	mp = mget();
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	if(mp == nil) {
 | |
|  		fn = nil;
 | |
|  		if(spinning)
 | |
| @@ -1419,28 +1419,28 @@
 | |
|  		startm(p, true);
 | |
|  		return;
 | |
|  	}
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	if(runtime_sched.gcwaiting) {
 | |
|  		p->status = Pgcstop;
 | |
|  		if(--runtime_sched.stopwait == 0)
 | |
|  			runtime_notewakeup(&runtime_sched.stopnote);
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		return;
 | |
|  	}
 | |
|  	if(runtime_sched.runqsize) {
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		startm(p, false);
 | |
|  		return;
 | |
|  	}
 | |
|  	// If this is the last running P and nobody is polling network,
 | |
|  	// need to wakeup another M to poll network.
 | |
|  	if(runtime_sched.npidle == (uint32)runtime_gomaxprocs-1 && runtime_atomicload64(&runtime_sched.lastpoll) != 0) {
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		startm(p, false);
 | |
|  		return;
 | |
|  	}
 | |
|  	pidleput(p);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  }
 | |
|  
 | |
|  // Tries to add one more P to execute G's.
 | |
| @@ -1512,11 +1512,11 @@
 | |
|  		runtime_xadd(&runtime_sched.nmspinning, -1);
 | |
|  	}
 | |
|  	p = releasep();
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	p->status = Pgcstop;
 | |
|  	if(--runtime_sched.stopwait == 0)
 | |
|  		runtime_notewakeup(&runtime_sched.stopnote);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	stopm();
 | |
|  }
 | |
|  
 | |
| @@ -1567,9 +1567,9 @@
 | |
|  		return gp;
 | |
|  	// global runq
 | |
|  	if(runtime_sched.runqsize) {
 | |
| -		runtime_lock(&runtime_sched);
 | |
| +		runtime_lock(&runtime_sched.lock);
 | |
|  		gp = globrunqget(m->p, 0);
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		if(gp)
 | |
|  			return gp;
 | |
|  	}
 | |
| @@ -1603,19 +1603,19 @@
 | |
|  	}
 | |
|  stop:
 | |
|  	// return P and block
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	if(runtime_sched.gcwaiting) {
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		goto top;
 | |
|  	}
 | |
|  	if(runtime_sched.runqsize) {
 | |
|  		gp = globrunqget(m->p, 0);
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		return gp;
 | |
|  	}
 | |
|  	p = releasep();
 | |
|  	pidleput(p);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	if(m->spinning) {
 | |
|  		m->spinning = false;
 | |
|  		runtime_xadd(&runtime_sched.nmspinning, -1);
 | |
| @@ -1624,9 +1624,9 @@
 | |
|  	for(i = 0; i < runtime_gomaxprocs; i++) {
 | |
|  		p = runtime_allp[i];
 | |
|  		if(p && p->runqhead != p->runqtail) {
 | |
| -			runtime_lock(&runtime_sched);
 | |
| +			runtime_lock(&runtime_sched.lock);
 | |
|  			p = pidleget();
 | |
| -			runtime_unlock(&runtime_sched);
 | |
| +			runtime_unlock(&runtime_sched.lock);
 | |
|  			if(p) {
 | |
|  				acquirep(p);
 | |
|  				goto top;
 | |
| @@ -1643,9 +1643,9 @@
 | |
|  		gp = runtime_netpoll(true);  // block until new work is available
 | |
|  		runtime_atomicstore64(&runtime_sched.lastpoll, runtime_nanotime());
 | |
|  		if(gp) {
 | |
| -			runtime_lock(&runtime_sched);
 | |
| +			runtime_lock(&runtime_sched.lock);
 | |
|  			p = pidleget();
 | |
| -			runtime_unlock(&runtime_sched);
 | |
| +			runtime_unlock(&runtime_sched.lock);
 | |
|  			if(p) {
 | |
|  				acquirep(p);
 | |
|  				injectglist(gp->schedlink);
 | |
| @@ -1688,14 +1688,14 @@
 | |
|  
 | |
|  	if(glist == nil)
 | |
|  		return;
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	for(n = 0; glist; n++) {
 | |
|  		gp = glist;
 | |
|  		glist = gp->schedlink;
 | |
|  		gp->status = Grunnable;
 | |
|  		globrunqput(gp);
 | |
|  	}
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  
 | |
|  	for(; n && runtime_sched.npidle; n--)
 | |
|  		startm(nil, false);
 | |
| @@ -1726,9 +1726,9 @@
 | |
|  	// This is a fancy way to say tick%61==0,
 | |
|  	// it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
 | |
|  	if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime_sched.runqsize > 0) {
 | |
| -		runtime_lock(&runtime_sched);
 | |
| +		runtime_lock(&runtime_sched.lock);
 | |
|  		gp = globrunqget(m->p, 1);
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		if(gp)
 | |
|  			resetspinning();
 | |
|  	}
 | |
| @@ -1822,9 +1822,9 @@
 | |
|  	gp->status = Grunnable;
 | |
|  	gp->m = nil;
 | |
|  	m->curg = nil;
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	globrunqput(gp);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	if(m->lockedg) {
 | |
|  		stoplockedm();
 | |
|  		execute(gp);  // Never returns.
 | |
| @@ -1925,24 +1925,24 @@
 | |
|  	g->status = Gsyscall;
 | |
|  
 | |
|  	if(runtime_atomicload(&runtime_sched.sysmonwait)) {  // TODO: fast atomic
 | |
| -		runtime_lock(&runtime_sched);
 | |
| +		runtime_lock(&runtime_sched.lock);
 | |
|  		if(runtime_atomicload(&runtime_sched.sysmonwait)) {
 | |
|  			runtime_atomicstore(&runtime_sched.sysmonwait, 0);
 | |
|  			runtime_notewakeup(&runtime_sched.sysmonnote);
 | |
|  		}
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  	}
 | |
|  
 | |
|  	m->mcache = nil;
 | |
|  	m->p->m = nil;
 | |
|  	runtime_atomicstore(&m->p->status, Psyscall);
 | |
|  	if(runtime_sched.gcwaiting) {
 | |
| -		runtime_lock(&runtime_sched);
 | |
| +		runtime_lock(&runtime_sched.lock);
 | |
|  		if (runtime_sched.stopwait > 0 && runtime_cas(&m->p->status, Psyscall, Pgcstop)) {
 | |
|  			if(--runtime_sched.stopwait == 0)
 | |
|  				runtime_notewakeup(&runtime_sched.stopnote);
 | |
|  		}
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  	}
 | |
|  
 | |
|  	m->locks--;
 | |
| @@ -2053,13 +2053,13 @@
 | |
|  	// Try to get any other idle P.
 | |
|  	m->p = nil;
 | |
|  	if(runtime_sched.pidle) {
 | |
| -		runtime_lock(&runtime_sched);
 | |
| +		runtime_lock(&runtime_sched.lock);
 | |
|  		p = pidleget();
 | |
|  		if(p && runtime_atomicload(&runtime_sched.sysmonwait)) {
 | |
|  			runtime_atomicstore(&runtime_sched.sysmonwait, 0);
 | |
|  			runtime_notewakeup(&runtime_sched.sysmonnote);
 | |
|  		}
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		if(p) {
 | |
|  			acquirep(p);
 | |
|  			return true;
 | |
| @@ -2078,7 +2078,7 @@
 | |
|  	gp->status = Grunnable;
 | |
|  	gp->m = nil;
 | |
|  	m->curg = nil;
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	p = pidleget();
 | |
|  	if(p == nil)
 | |
|  		globrunqput(gp);
 | |
| @@ -2086,7 +2086,7 @@
 | |
|  		runtime_atomicstore(&runtime_sched.sysmonwait, 0);
 | |
|  		runtime_notewakeup(&runtime_sched.sysmonnote);
 | |
|  	}
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	if(p) {
 | |
|  		acquirep(p);
 | |
|  		execute(gp);  // Never returns.
 | |
| @@ -2365,13 +2365,13 @@
 | |
|  
 | |
|  	if(n > MaxGomaxprocs)
 | |
|  		n = MaxGomaxprocs;
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	ret = runtime_gomaxprocs;
 | |
|  	if(n <= 0 || n == ret) {
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		return ret;
 | |
|  	}
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  
 | |
|  	runtime_semacquire(&runtime_worldsema, false);
 | |
|  	m->gcing = 1;
 | |
| @@ -2476,7 +2476,7 @@
 | |
|  }
 | |
|  
 | |
|  static struct {
 | |
| -	Lock;
 | |
| +	Lock lock;
 | |
|  	void (*fn)(uintptr*, int32);
 | |
|  	int32 hz;
 | |
|  	uintptr pcbuf[TracebackMaxFrames];
 | |
| @@ -2508,9 +2508,9 @@
 | |
|  	if(mp->mcache == nil)
 | |
|  		traceback = false;
 | |
|  
 | |
| -	runtime_lock(&prof);
 | |
| +	runtime_lock(&prof.lock);
 | |
|  	if(prof.fn == nil) {
 | |
| -		runtime_unlock(&prof);
 | |
| +		runtime_unlock(&prof.lock);
 | |
|  		mp->mallocing--;
 | |
|  		return;
 | |
|  	}
 | |
| @@ -2538,7 +2538,7 @@
 | |
|  			prof.pcbuf[1] = (uintptr)System;
 | |
|  	}
 | |
|  	prof.fn(prof.pcbuf, n);
 | |
| -	runtime_unlock(&prof);
 | |
| +	runtime_unlock(&prof.lock);
 | |
|  	mp->mallocing--;
 | |
|  }
 | |
|  
 | |
| @@ -2563,13 +2563,13 @@
 | |
|  	// it would deadlock.
 | |
|  	runtime_resetcpuprofiler(0);
 | |
|  
 | |
| -	runtime_lock(&prof);
 | |
| +	runtime_lock(&prof.lock);
 | |
|  	prof.fn = fn;
 | |
|  	prof.hz = hz;
 | |
| -	runtime_unlock(&prof);
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_unlock(&prof.lock);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	runtime_sched.profilehz = hz;
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  
 | |
|  	if(hz != 0)
 | |
|  		runtime_resetcpuprofiler(hz);
 | |
| @@ -2707,11 +2707,11 @@
 | |
|  static void
 | |
|  incidlelocked(int32 v)
 | |
|  {
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	runtime_sched.nmidlelocked += v;
 | |
|  	if(v > 0)
 | |
|  		checkdead();
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  }
 | |
|  
 | |
|  // Check for deadlock situation.
 | |
| @@ -2780,16 +2780,16 @@
 | |
|  		runtime_usleep(delay);
 | |
|  		if(runtime_debug.schedtrace <= 0 &&
 | |
|  			(runtime_sched.gcwaiting || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs)) {  // TODO: fast atomic
 | |
| -			runtime_lock(&runtime_sched);
 | |
| +			runtime_lock(&runtime_sched.lock);
 | |
|  			if(runtime_atomicload(&runtime_sched.gcwaiting) || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs) {
 | |
|  				runtime_atomicstore(&runtime_sched.sysmonwait, 1);
 | |
| -				runtime_unlock(&runtime_sched);
 | |
| +				runtime_unlock(&runtime_sched.lock);
 | |
|  				runtime_notesleep(&runtime_sched.sysmonnote);
 | |
|  				runtime_noteclear(&runtime_sched.sysmonnote);
 | |
|  				idle = 0;
 | |
|  				delay = 20;
 | |
|  			} else
 | |
| -				runtime_unlock(&runtime_sched);
 | |
| +				runtime_unlock(&runtime_sched.lock);
 | |
|  		}
 | |
|  		// poll network if not polled for more than 10ms
 | |
|  		lastpoll = runtime_atomicload64(&runtime_sched.lastpoll);
 | |
| @@ -2918,7 +2918,7 @@
 | |
|  	if(starttime == 0)
 | |
|  		starttime = now;
 | |
|  
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	runtime_printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idlethreads=%d runqueue=%d",
 | |
|  		(now-starttime)/1000000, runtime_gomaxprocs, runtime_sched.npidle, runtime_sched.mcount,
 | |
|  		runtime_sched.nmidle, runtime_sched.runqsize);
 | |
| @@ -2954,7 +2954,7 @@
 | |
|  		}
 | |
|  	}
 | |
|  	if(!detailed) {
 | |
| -		runtime_unlock(&runtime_sched);
 | |
| +		runtime_unlock(&runtime_sched.lock);
 | |
|  		return;
 | |
|  	}
 | |
|  	for(mp = runtime_allm; mp; mp = mp->alllink) {
 | |
| @@ -2986,7 +2986,7 @@
 | |
|  			lockedm ? lockedm->id : -1);
 | |
|  	}
 | |
|  	runtime_unlock(&allglock);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  }
 | |
|  
 | |
|  // Put mp on midle list.
 | |
| @@ -3142,9 +3142,9 @@
 | |
|  	for(i=0; i<n; i++)
 | |
|  		batch[i]->schedlink = batch[i+1];
 | |
|  	// Now put the batch on global queue.
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	globrunqputbatch(batch[0], batch[n], n+1);
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	return true;
 | |
|  }
 | |
|  
 | |
| @@ -3296,11 +3296,11 @@
 | |
|  {
 | |
|  	int32 out;
 | |
|  
 | |
| -	runtime_lock(&runtime_sched);
 | |
| +	runtime_lock(&runtime_sched.lock);
 | |
|  	out = runtime_sched.maxmcount;
 | |
|  	runtime_sched.maxmcount = in;
 | |
|  	checkmcount();
 | |
| -	runtime_unlock(&runtime_sched);
 | |
| +	runtime_unlock(&runtime_sched.lock);
 | |
|  	return out;
 | |
|  }
 | |
|  
 | |
| diff -r bb70e852004f libgo/runtime/runtime.h
 | |
| --- a/libgo/runtime/runtime.h	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/runtime.h	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -285,7 +285,7 @@
 | |
|  
 | |
|  struct P
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock	lock;
 | |
|  
 | |
|  	int32	id;
 | |
|  	uint32	status;		// one of Pidle/Prunning/...
 | |
| @@ -383,7 +383,7 @@
 | |
|  
 | |
|  struct	Timers
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock	lock;
 | |
|  	G	*timerproc;
 | |
|  	bool		sleeping;
 | |
|  	bool		rescheduling;
 | |
| diff -r bb70e852004f libgo/runtime/sema.goc
 | |
| --- a/libgo/runtime/sema.goc	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/sema.goc	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -35,7 +35,7 @@
 | |
|  typedef struct SemaRoot SemaRoot;
 | |
|  struct SemaRoot
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock		lock;
 | |
|  	SemaWaiter*	head;
 | |
|  	SemaWaiter*	tail;
 | |
|  	// Number of waiters. Read w/o the lock.
 | |
| @@ -47,7 +47,7 @@
 | |
|  
 | |
|  struct semtable
 | |
|  {
 | |
| -	SemaRoot;
 | |
| +	SemaRoot root;
 | |
|  	uint8 pad[CacheLineSize-sizeof(SemaRoot)];
 | |
|  };
 | |
|  static struct semtable semtable[SEMTABLESZ];
 | |
| @@ -55,7 +55,7 @@
 | |
|  static SemaRoot*
 | |
|  semroot(uint32 volatile *addr)
 | |
|  {
 | |
| -	return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
 | |
| +	return &semtable[((uintptr)addr >> 3) % SEMTABLESZ].root;
 | |
|  }
 | |
|  
 | |
|  static void
 | |
| @@ -124,19 +124,19 @@
 | |
|  	}
 | |
|  	for(;;) {
 | |
|  
 | |
| -		runtime_lock(root);
 | |
| +		runtime_lock(&root->lock);
 | |
|  		// Add ourselves to nwait to disable "easy case" in semrelease.
 | |
|  		runtime_xadd(&root->nwait, 1);
 | |
|  		// Check cansemacquire to avoid missed wakeup.
 | |
|  		if(cansemacquire(addr)) {
 | |
|  			runtime_xadd(&root->nwait, -1);
 | |
| -			runtime_unlock(root);
 | |
| +			runtime_unlock(&root->lock);
 | |
|  			return;
 | |
|  		}
 | |
|  		// Any semrelease after the cansemacquire knows we're waiting
 | |
|  		// (we set nwait above), so go to sleep.
 | |
|  		semqueue(root, addr, &s);
 | |
| -		runtime_parkunlock(root, "semacquire");
 | |
| +		runtime_parkunlock(&root->lock, "semacquire");
 | |
|  		if(cansemacquire(addr)) {
 | |
|  			if(t0)
 | |
|  				runtime_blockevent(s.releasetime - t0, 3);
 | |
| @@ -161,11 +161,11 @@
 | |
|  		return;
 | |
|  
 | |
|  	// Harder case: search for a waiter and wake it.
 | |
| -	runtime_lock(root);
 | |
| +	runtime_lock(&root->lock);
 | |
|  	if(runtime_atomicload(&root->nwait) == 0) {
 | |
|  		// The count is already consumed by another goroutine,
 | |
|  		// so no need to wake up another goroutine.
 | |
| -		runtime_unlock(root);
 | |
| +		runtime_unlock(&root->lock);
 | |
|  		return;
 | |
|  	}
 | |
|  	for(s = root->head; s; s = s->next) {
 | |
| @@ -175,7 +175,7 @@
 | |
|  			break;
 | |
|  		}
 | |
|  	}
 | |
| -	runtime_unlock(root);
 | |
| +	runtime_unlock(&root->lock);
 | |
|  	if(s) {
 | |
|  		if(s->releasetime)
 | |
|  			s->releasetime = runtime_cputicks();
 | |
| @@ -211,7 +211,7 @@
 | |
|  typedef struct SyncSema SyncSema;
 | |
|  struct SyncSema
 | |
|  {
 | |
| -	Lock;
 | |
| +	Lock		lock;
 | |
|  	SemaWaiter*	head;
 | |
|  	SemaWaiter*	tail;
 | |
|  };
 | |
| @@ -238,7 +238,7 @@
 | |
|  		w.releasetime = -1;
 | |
|  	}
 | |
|  
 | |
| -	runtime_lock(s);
 | |
| +	runtime_lock(&s->lock);
 | |
|  	if(s->head && s->head->nrelease > 0) {
 | |
|  		// have pending release, consume it
 | |
|  		wake = nil;
 | |
| @@ -249,7 +249,7 @@
 | |
|  			if(s->head == nil)
 | |
|  				s->tail = nil;
 | |
|  		}
 | |
| -		runtime_unlock(s);
 | |
| +		runtime_unlock(&s->lock);
 | |
|  		if(wake)
 | |
|  			runtime_ready(wake->g);
 | |
|  	} else {
 | |
| @@ -259,7 +259,7 @@
 | |
|  		else
 | |
|  			s->tail->next = &w;
 | |
|  		s->tail = &w;
 | |
| -		runtime_parkunlock(s, "semacquire");
 | |
| +		runtime_parkunlock(&s->lock, "semacquire");
 | |
|  		if(t0)
 | |
|  			runtime_blockevent(w.releasetime - t0, 2);
 | |
|  	}
 | |
| @@ -274,7 +274,7 @@
 | |
|  	w.next = nil;
 | |
|  	w.releasetime = 0;
 | |
|  
 | |
| -	runtime_lock(s);
 | |
| +	runtime_lock(&s->lock);
 | |
|  	while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
 | |
|  		// have pending acquire, satisfy it
 | |
|  		wake = s->head;
 | |
| @@ -293,7 +293,7 @@
 | |
|  		else
 | |
|  			s->tail->next = &w;
 | |
|  		s->tail = &w;
 | |
| -		runtime_parkunlock(s, "semarelease");
 | |
| +		runtime_parkunlock(&s->lock, "semarelease");
 | |
|  	} else
 | |
| -		runtime_unlock(s);
 | |
| +		runtime_unlock(&s->lock);
 | |
|  }
 | |
| diff -r bb70e852004f libgo/runtime/sigqueue.goc
 | |
| --- a/libgo/runtime/sigqueue.goc	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/sigqueue.goc	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -32,7 +32,7 @@
 | |
|  #include "defs.h"
 | |
|  
 | |
|  static struct {
 | |
| -	Note;
 | |
| +	Note note;
 | |
|  	uint32 mask[(NSIG+31)/32];
 | |
|  	uint32 wanted[(NSIG+31)/32];
 | |
|  	uint32 state;
 | |
| @@ -70,7 +70,7 @@
 | |
|  					new = HASSIGNAL;
 | |
|  				if(runtime_cas(&sig.state, old, new)) {
 | |
|  					if (old == HASWAITER)
 | |
| -						runtime_notewakeup(&sig);
 | |
| +						runtime_notewakeup(&sig.note);
 | |
|  					break;
 | |
|  				}
 | |
|  			}
 | |
| @@ -107,8 +107,8 @@
 | |
|  				new = HASWAITER;
 | |
|  			if(runtime_cas(&sig.state, old, new)) {
 | |
|  				if (new == HASWAITER) {
 | |
| -					runtime_notetsleepg(&sig, -1);
 | |
| -					runtime_noteclear(&sig);
 | |
| +					runtime_notetsleepg(&sig.note, -1);
 | |
| +					runtime_noteclear(&sig.note);
 | |
|  				}
 | |
|  				break;
 | |
|  			}
 | |
| @@ -138,7 +138,7 @@
 | |
|  		// to use for initialization.  It does not pass
 | |
|  		// signal information in m.
 | |
|  		sig.inuse = true;	// enable reception of signals; cannot disable
 | |
| -		runtime_noteclear(&sig);
 | |
| +		runtime_noteclear(&sig.note);
 | |
|  		return;
 | |
|  	}
 | |
|  	
 | |
| diff -r bb70e852004f libgo/runtime/time.goc
 | |
| --- a/libgo/runtime/time.goc	Fri Jan 16 13:28:21 2015 -0800
 | |
| +++ b/libgo/runtime/time.goc	Fri Apr 03 17:31:02 2015 -0700
 | |
| @@ -92,17 +92,17 @@
 | |
|  	t.fv = &readyv;
 | |
|  	t.arg.__object = g;
 | |
|  	t.seq = 0;
 | |
| -	runtime_lock(&timers);
 | |
| +	runtime_lock(&timers.lock);
 | |
|  	addtimer(&t);
 | |
| -	runtime_parkunlock(&timers, reason);
 | |
| +	runtime_parkunlock(&timers.lock, reason);
 | |
|  }
 | |
|  
 | |
|  void
 | |
|  runtime_addtimer(Timer *t)
 | |
|  {
 | |
| -	runtime_lock(&timers);
 | |
| +	runtime_lock(&timers.lock);
 | |
|  	addtimer(t);
 | |
| -	runtime_unlock(&timers);
 | |
| +	runtime_unlock(&timers.lock);
 | |
|  }
 | |
|  
 | |
|  // Add a timer to the heap and start or kick the timer proc
 | |
| @@ -167,14 +167,14 @@
 | |
|  	i = t->i;
 | |
|  	gi = i;
 | |
|  
 | |
| -	runtime_lock(&timers);
 | |
| +	runtime_lock(&timers.lock);
 | |
|  
 | |
|  	// t may not be registered anymore and may have
 | |
|  	// a bogus i (typically 0, if generated by Go).
 | |
|  	// Verify it before proceeding.
 | |
|  	i = t->i;
 | |
|  	if(i < 0 || i >= timers.len || timers.t[i] != t) {
 | |
| -		runtime_unlock(&timers);
 | |
| +		runtime_unlock(&timers.lock);
 | |
|  		return false;
 | |
|  	}
 | |
|  
 | |
| @@ -190,7 +190,7 @@
 | |
|  	}
 | |
|  	if(debug)
 | |
|  		dumptimers("deltimer");
 | |
| -	runtime_unlock(&timers);
 | |
| +	runtime_unlock(&timers.lock);
 | |
|  	return true;
 | |
|  }
 | |
|  
 | |
| @@ -209,7 +209,7 @@
 | |
|  	uintptr seq;
 | |
|  
 | |
|  	for(;;) {
 | |
| -		runtime_lock(&timers);
 | |
| +		runtime_lock(&timers.lock);
 | |
|  		timers.sleeping = false;
 | |
|  		now = runtime_nanotime();
 | |
|  		for(;;) {
 | |
| @@ -236,7 +236,7 @@
 | |
|  			f = (void*)t->fv->fn;
 | |
|  			arg = t->arg;
 | |
|  			seq = t->seq;
 | |
| -			runtime_unlock(&timers);
 | |
| +			runtime_unlock(&timers.lock);
 | |
|  			__builtin_call_with_static_chain(f(arg, seq), fv);
 | |
|  
 | |
|  			// clear f and arg to avoid leak while sleeping for next timer
 | |
| @@ -246,20 +246,20 @@
 | |
|  			arg.__object = nil;
 | |
|  			USED(&arg);
 | |
|  
 | |
| -			runtime_lock(&timers);
 | |
| +			runtime_lock(&timers.lock);
 | |
|  		}
 | |
|  		if(delta < 0) {
 | |
|  			// No timers left - put goroutine to sleep.
 | |
|  			timers.rescheduling = true;
 | |
|  			runtime_g()->isbackground = true;
 | |
| -			runtime_parkunlock(&timers, "timer goroutine (idle)");
 | |
| +			runtime_parkunlock(&timers.lock, "timer goroutine (idle)");
 | |
|  			runtime_g()->isbackground = false;
 | |
|  			continue;
 | |
|  		}
 | |
|  		// At least one timer pending.  Sleep until then.
 | |
|  		timers.sleeping = true;
 | |
|  		runtime_noteclear(&timers.waitnote);
 | |
| -		runtime_unlock(&timers);
 | |
| +		runtime_unlock(&timers.lock);
 | |
|  		runtime_notetsleepg(&timers.waitnote, delta);
 | |
|  	}
 | |
|  }
 |