Browse Source

Adding tamias sources for posible later work.

Beoran 11 years ago
parent
commit
90d2b74f6d

+ 309 - 0
src/tamias/arbiter.go

@@ -0,0 +1,309 @@
+package tamias
+
+// Determines how fast penetrations resolve themselves.
+var BIAS_COEF	=	Float(0.1)
+
+// Amount of allowed penetration. Used to reduce vibrating contacts.
+var COLLISION_SLOP	=	Float(0.1)
+
+
+// Data structure for contact points.
+type Contact struct {
+	// Contact point and normal.
+	P, N Vect
+	// Penetration distance.
+	Dist Float
+	
+	// Calculated by cpArbiterPreStep().
+	r1, r2 Vect
+	nMass, tMass, bounce Float
+
+	// Persistant contact information.
+	jnAcc, jtAcc, jBias Float
+	bias Float
+	
+	// Hash value used to (mostly) uniquely identify a contact.
+	Hash HashValue
+} 
+
+
+type ArbiterState int
+
+const (
+  ArbiterStateNormal	   = ArbiterState(0)
+  ArbiterStateFirstColl  = ArbiterState(1)
+  ArbiterStateIgnore	   = ArbiterState(2)
+)
+
+// Data structure for tracking collisions between shapes.
+type Arbiter struct {
+  // Information on the contact points between the objects.
+  numContacts int
+  contacts []Contact
+	
+  // The two shapes involved in the collision.
+  // These variables are NOT in the order defined by the collision handler.
+  private_a, private_b *Shape
+ 
+  // Calculated before calling the pre-solve collision handler
+  // Override them with custom values if you want specialized behavior
+  e Float
+  u Float
+  // Used for surface_v calculations, implementation may change
+  surface_vr Vect
+	
+  // Time stamp of the arbiter. (from cpSpace)
+  stamp int
+	
+  handler * CollisionHandler
+	
+  // Are the shapes swapped in relation to the collision handler?
+  swappedColl bool
+  state ArbiterState
+} 
+
+
+func (con *Contact) Init(p, n Vect, dist Float, hash HashValue) (*Contact) {	
+	con.P 	 	  = p
+	con.N 	 	  = n
+	con.Dist 	  = dist	
+	con.jnAcc 	= Float(0.0)
+	con.jtAcc 	= Float(0.0)
+	con.jBias 	= Float(0.0)
+	con.Hash 	  = hash		
+	return con;
+}
+
+func (arb *Arbiter) TotalImpulse() (Vect) {
+	contacts := arb.contacts
+	sum 	 := VZERO
+	count	 := arb.numContacts
+	
+	for i:=0 ; i < count ; i++ {
+		con := &contacts[i]
+		sum  = sum.Add(con.N.Mult(con.jnAcc))
+	}	
+	return sum;
+}
+
+
+func (arb *Arbiter) TotalImpulseWithFriction() (Vect) {
+	contacts := arb.contacts
+	sum 	 := VZERO
+	count	 := arb.numContacts
+	
+	for i:=0 ; i < count ; i++ {
+		con := &contacts[i]
+		sum  = sum.Add(con.N.Rotate(V(con.jnAcc, con.jtAcc)))
+	}	
+	return sum;
+}
+
+func ContactsEstimateCrushingImpulse(contacts []Contact, numContacts int) (Float) {
+	fsum := Float(0.0);
+	vsum := VZERO;
+	
+	for i:=0; i<numContacts; i++ {
+		con 	:= &contacts[i]
+		j 	:= con.N.Rotate(V(con.jnAcc, con.jtAcc))		
+		fsum    += j.Length()
+		vsum     = vsum.Add(j)
+	}
+	
+	vmag := vsum.Length()
+	return (Float(1.0) - vmag/fsum)  
+}
+
+func (arb *Arbiter) Ignore() {
+  arb.state = ArbiterStateIgnore
+}
+
+func ArbiterAlloc() (* Arbiter) {
+  return &Arbiter{}
+}
+
+func (arb *Arbiter) Init(a *Shape, b *Shape) (* Arbiter) {
+	arb.numContacts = 0
+	arb.contacts 	= nil
+	
+	arb.private_a 	= a
+	arb.private_b 	= b
+	
+	arb.stamp 	= -1;
+	arb.state 	= ArbiterStateFirstColl	
+	return arb
+}
+
+func ArbiterNew(a, b *Shape) (*Arbiter) {
+	return ArbiterAlloc().Init(a, b);
+}
+
+func (arb *Arbiter) Destroy() {
+  //	if(arb.contacts) { arb.contacts = nil }
+}
+
+
+func (arb *Arbiter) Free() { 
+  arb.Destroy()
+  arb = nil // garbage collected
+}  
+
+func (arb *Arbiter) Update(contacts []Contact, numContacts int, 
+		    handler *CollisionHandler, a *Shape, b *Shape) {
+
+	// Arbiters without contact data may exist if a collision function rejected the collision.
+	if arb.contacts != nil {
+		// Iterate over the possible pairs to look for hash value matches.
+		for i:=0; i < arb.numContacts; i++ {
+			old := &arb.contacts[i]
+			
+			for j:=0; j<numContacts; j++ {
+				new_contact := &contacts[j];
+				
+	// This could trigger false positives, but is fairly unlikely nor serious if it does.
+				if new_contact.Hash == old.Hash {
+					// Copy the persistant contact information.
+					new_contact.jnAcc = old.jnAcc;
+					new_contact.jtAcc = old.jtAcc;
+				}
+			}
+		}
+
+	// cpfree(arb.contacts); 
+	}
+	
+	arb.contacts 	= contacts;
+	arb.numContacts = numContacts;
+	
+	arb.handler 	= handler;
+	arb.swappedColl = (a.collision_type != handler.a);	
+	
+	arb.e = a.e * b.e;
+	arb.u = a.u * b.u;
+	arb.surface_vr = a.surface_v.Sub(b.surface_v)
+	
+	// For collisions between two similar primitive types, the order could have been swapped.
+	arb.private_a = a; arb.private_b = b;
+
+}
+
+func (arb *Arbiter) PreStep(dt_inv Float) { 
+	shapea := arb.private_a;
+	shapeb := arb.private_b;
+
+	a := shapea.Body;
+	b := shapeb.Body;
+	
+	for i:=0; i<arb.numContacts; i++ {
+		con := &arb.contacts[i];
+		
+		// Calculate the offsets.
+		con.r1 = con.P.Sub(a.p);
+		con.r2 = con.P.Sub(b.p);
+		
+		// Calculate the mass normal and mass tangent.
+		con.nMass = Float(1.0) / KScalar(a, b, con.r1, con.r2, con.N)
+		con.tMass = Float(1.0) / KScalar(a, b, con.r1, con.r2, con.N.Perp())	
+    aidmin	 := Float(0.0).Min(con.Dist + COLLISION_SLOP)		
+				
+		// Calculate the target bias velocity.
+		con.bias = -BIAS_COEF*dt_inv*aidmin;
+		con.jBias = 0.0;
+		
+		// Calculate the target bounce velocity.
+		con.bounce = NormalRelativeVelocity(a, b, con.r1, con.r2, con.N)*arb.e;
+		//cpvdot(con.n, cpvsub(v2, v1))*e;
+	}
+}
+
+
+func (arb *Arbiter) ApplyCachedImpulse() { 
+	shapea := arb.private_a
+	shapeb := arb.private_b
+		
+	arb.u 		= shapea.u * shapeb.u
+	arb.surface_vr 	= shapeb.surface_v.Sub(shapea.surface_v)
+
+	a 	       := shapea.Body
+	b 	       := shapeb.Body
+	
+	for  i:=0 ; i<arb.numContacts ; i++ {
+		con := &arb.contacts[i]
+		aid := con.N.Rotate(V(con.jnAcc, con.jtAcc))
+		ApplyImpulses(a, b, con.r1, con.r2, aid)
+	}
+}
+
+func (arb *Arbiter) ApplyImpulse(eCoef Float) { 
+	a := arb.private_a.Body
+	b := arb.private_b.Body
+
+	for i:=0 ; i<arb.numContacts ; i++  { 
+		con := &arb.contacts[i]
+		n   := con.N
+		r1  := con.r1
+		r2  := con.r2
+		
+		// Calculate the relative bias velocities.
+		vb1 := a.v_bias.Add(r1.Perp().Mult(a.w_bias))
+		vb2 := b.v_bias.Add(r2.Perp().Mult(b.w_bias))
+		vbn := vb2.Sub(vb1).Dot(n)
+		
+		// Calculate and clamp the bias impulse.
+		jbn 	  := (con.bias - vbn)*con.nMass
+		jbnOld 	  := con.jBias
+		con.jBias  = jbnOld + jbn.Max(Float(0.0))
+		jbn 	   = con.jBias - jbnOld
+		
+		// Apply the bias impulse.
+		ApplyBiasImpulses(a, b, r1, r2, n.Mult(jbn))
+
+		// Calculate the relative velocity.
+		vr  	  := RelativeVelocity(a, b, r1, r2)
+		vrn       := vr.Dot(n)
+		
+		// Calculate and clamp the normal impulse.
+		jn    := -(con.bounce*eCoef + vrn)*con.nMass
+		jnOld := con.jnAcc
+		con.jnAcc =  jnOld + jn.Max(Float(0.0))		
+		jn     = con.jnAcc - jnOld
+		
+		// Calculate the relative tangent velocity.
+		vrt    := vr.Add(arb.surface_vr).Dot(n.Perp()) 		
+		
+		// Calculate and clamp the friction impulse.
+		jtMax := arb.u*con.jnAcc
+		jt    := -vrt*con.tMass
+		jtOld := con.jtAcc
+		con.jtAcc = (jtOld + jt).Clamp(-jtMax, jtMax)		
+		jt     = con.jtAcc - jtOld
+		
+		// Apply the final impulse.
+		ApplyImpulses(a, b, r1, r2, n.Rotate(V(jn, jt)))
+	}
+}
+
+func (arb *Arbiter) GetShapes() ( a *Shape, b *Shape) {
+  if arb.swappedColl {
+    return arb.private_b, arb.private_a
+  }
+  return arb.private_a, arb.private_b
+}
+
+
+func (arb *Arbiter) IsFirstContact() (bool) {
+	return arb.state == ArbiterStateFirstColl
+}
+
+func (arb * Arbiter) GetNormal(i int) (Vect) {
+  n := arb.contacts[i].N;
+  if arb.swappedColl { 
+    return n.Neg()
+  }
+  return n
+}
+
+func (arb * Arbiter) GetPoint(i int) (Vect) {
+  p := arb.contacts[i].P;
+  return p
+}

+ 98 - 0
src/tamias/array.go

@@ -0,0 +1,98 @@
+package tamias
+
+
+type ArrayElement interface {} 
+
+type Array struct{
+  num, max int
+  arr []ArrayElement
+} 
+
+func ArrayAlloc() (*Array) {
+  return &Array{}
+}
+
+
+func (arr * Array) Init(size int)  (*Array) {
+  arr.num = 0
+  if size < 4 { size    =  4; }
+  arr.max = size
+  arr.arr = make([]ArrayElement, size)
+  return arr
+}
+
+func ArrayNew(size int)  (*Array) {
+  return ArrayAlloc().Init(size)
+}
+
+func (arr * Array) Destroy()  {
+  arr.arr = nil
+}
+
+func (arr * Array) Size() (int)  {
+  return arr.num
+}
+
+func (arr * Array) Index(i int) (ArrayElement) {
+  if i < 0 || i >= arr.max { return nil }
+  return arr.arr[i]
+}
+
+func (arr * Array) Free() {
+  arr.Destroy()
+}
+
+func (arr * Array) Push(object ArrayElement)  {
+  if(arr.num == arr.max){
+    arr.max *= 2
+    newarr  := make([]ArrayElement, arr.max)
+    // copy old to new 
+    copy(newarr, arr.arr) 
+    arr.arr = newarr
+  }
+  arr.arr[arr.num] = object
+  arr.num++  
+}
+
+func (arr * Array) Pop() (object ArrayElement) {
+  arr.num--
+  
+  value := arr.arr[arr.num]
+  arr.arr[arr.num] = nil  
+  
+  return value
+}  
+
+func (arr * Array) DeleteIndex(idx int) { 
+  arr.num--  
+  arr.arr[idx]      = arr.arr[arr.num]
+  arr.arr[arr.num]  = nil
+}
+
+func (arr * Array) DeleteObj(obj ArrayElement) { 
+  for i:=0; i<arr.num; i++ {
+    if arr.arr[i] == obj {
+      arr.DeleteIndex(i)
+      return
+    }
+  }
+}
+
+/*
+void
+cpArrayEach(cpArray *arr, cpArrayIter iterFunc, void *data)
+{
+  for(int i=0; i<arr.num; i++)
+    iterFunc(arr.arr[i], data)
+}
+*/
+
+func (arr * Array) Contains(obj ArrayElement) (bool) { 
+  for i:=0; i<arr.num; i++ {
+    if arr.arr[i] == obj {      
+      return true
+    }
+  }
+  return false;
+}
+

+ 87 - 0
src/tamias/bb.go

@@ -0,0 +1,87 @@
+package tamias
+import "fmt" 
+
+// Bounding box type
+type BB struct {
+  L, T, B, R Float
+}
+
+//BBMake returns a new bounds box with the given left, top, right, and bottom
+//coordinates  in that respective order.
+func BBMake(l, t, r, b Float) (BB) { 
+  return BB{L: l, B: b, R: r, T: t}
+}
+
+func (a BB) Intersects(b BB) bool {
+  return (a.L<=b.R && b.L<=a.R && a.B<=b.T && b.B<=a.T)
+}
+
+func (bb BB) Contains(other BB) bool {
+  return (bb.L < other.L && bb.R > other.R && bb.B < other.B && bb.T > other.T)
+}
+
+func (bb BB) ContainsVect(v Vect) bool {
+  return (bb.L < v.X && bb.R > v.X && bb.B < v.Y && bb.T > v.Y)
+}
+
+func (am BB) Merge(bm BB) (BB) {
+  l := am.L.Min(bm.L)
+  b := am.B.Min(bm.B)
+  r := am.R.Max(bm.R)
+  t := am.T.Max(bm.T)
+  return BBMake(l, t, r, b)
+}
+
+func (bb BB) Expand(v Vect) (BB) {
+  l := bb.L.Min(v.X)
+  b := bb.B.Min(v.Y)
+  r := bb.R.Max(v.X)
+  t := bb.T.Max(v.Y)
+  return BBMake(l, t, r, b)
+}
+
+func (bb BB) Grow(by Float) (BB) {
+  l := bb.L - by
+  b := bb.B - by
+  r := bb.R + by
+  t := bb.T + by
+  return BBMake(l, t, r, b)
+}
+
+
+// clamps the vector to lie within the bbox
+func (bb BB) ClampVect(v Vect) (Vect) {
+  x :=  bb.L.Max(v.X).Min(bb.R) 
+  y :=  bb.B.Max(v.Y).Min(bb.T) 
+  return V(x, y)
+}
+
+// wrap a vector to a bbox
+func (bb BB) WrapVect(v Vect) (Vect) {
+	ix 	:= (bb.T - bb.L).Abs();
+	modx 	:= (v.X  - bb.L).Mod(ix);
+	x 	:= modx
+	if (modx <= 0.0) { 
+	  x 	 = modx + ix
+	}  
+	
+	iy 	:= (bb.T - bb.B).Abs();
+	mody 	:= (v.Y - bb.B).Mod(iy);
+	y 	:= mody
+	if (mody <= 0.0) { 
+	  y 	 = mody + iy
+	}  
+	return V(x + bb.L, y + bb.B);
+}
+
+func (bb BB) String() (string) {
+  return fmt.Sprintf("[%.3f,%.3f|%.3f,%.3f]", bb.L, bb.T, bb.B, bb.R)  
+}
+
+/*
+func (bb * BB) String() string {
+  var str string
+  fmt.Sprintf(str, "BB [%f %f %f %f]", bb.L, bb.T, bb.R, bb.B) 
+  return str
+}
+*/

+ 284 - 0
src/tamias/body.go

@@ -0,0 +1,284 @@
+package tamias
+
+type BodyVelocityFunc func (body Body, gravity Vect, damping, dt Float)
+type BodyPositionFunc func (body Body, dt Float)
+type DataPointer * interface{}
+
+/*
+var (
+  BodyUpdateVelocityDefault BodyVelocityFunc = Body.UpdateVelocity
+  BodyUpdatePositionDefault BodyPositionFunc = Body.UpdatePosition
+) 
+*/
+ 
+type Body struct  {
+	// *** Integration Functions.ntoehu
+
+	// Function that is called to integrate the body's velocity. (Defaults to cpBodyUpdateVelocity)
+	velocityFunc BodyVelocityFunc;
+	
+	// Function that is called to integrate the body's position. (Defaults to cpBodyUpdatePosition)
+	positionFunc BodyPositionFunc;
+	
+	// *** Mass Properties
+	
+	// Mass and it's inverse.
+	// Always use cpBodySetMass() whenever changing the mass as these values must agree.
+	m, m_inv Float;
+	
+	// Moment of inertia and it's inverse.
+	// Always use cpBodySetMoment() whenever changing the moment as these values must agree.
+	i, i_inv Float;
+	
+	// *** Positional Properties
+	
+	// Linear components of motion (position, velocity, and force)
+	p, v, f Vect;
+	
+	// Angular components of motion (angle, angular velocity, and torque)
+	// Always use cpBodySetAngle() to set the angle of the body as a and rot must agree.
+	a, w, t Float;
+	
+	// Cached unit length vector representing the angle of the body.
+	// Used for fast vector rotation using cpvrotate().
+	rot Vect;
+	
+	// *** User Definable Fields
+	
+	// User defined data pointer.
+	data DataPointer;
+	
+	// Maximum velocities this body can move at after integrating velocity
+	v_limit, w_limit Float;
+	
+	// *** Internally Used Fields
+	
+	// Velocity bias values used when solving penetrations and correcting constraints.
+	v_bias Vect;
+	w_bias Float;
+	
+	//	int active;
+}	
+
+func (body * Body) Mass() (Float)  {
+  return body.m
+}
+
+func (body * Body) SetMass(m Float) (Float)  {
+  body.m 	= m
+  body.m_inv	= Float(1.0) / m
+  return body.m
+}
+
+func (body * Body) Moment() (Float)  {
+  return body.i
+}
+
+func (body * Body) SetMoment(i Float) (Float)  {
+  body.i 	= i	
+  body.i_inv 	= Float(1.0) / i
+  return body.i
+}
+
+func (body * Body) Pos() (Vect)  {
+  return body.p
+}
+
+func (body * Body) Vel() (Vect)  {
+  return body.v
+}
+
+func (body * Body) Force() (Vect)  {
+  return body.f
+}
+
+func (body * Body) Angle() (Float)  {
+  return body.a
+}
+
+func (body * Body) SetAngle(a Float) (Float)  {
+  body.a 	= a
+  body.rot	= a.VectForAngle()
+  return body.a
+}
+
+func (body * Body) AngVel() (Float)  {
+  return body.w
+}
+
+func (body * Body) Torque() (Float)  {
+  return body.t
+}
+
+func (body * Body) Rot() (Vect)  {
+  return body.rot
+}
+
+func (body * Body) VelLimit() (Float)  {
+  return body.v_limit
+}
+
+func (body * Body) AngVelLimit() (Float)  {
+  return body.w_limit
+}
+
+func (body * Body) SetVelLimit(v Float) (Float)  {
+  body.v_limit = v
+  return body.v_limit
+}
+
+func (body * Body) SetAngVelLimit(v Float) (Float)  {
+  body.w_limit = v
+  return body.w_limit
+}
+
+// Convert body local to world coordinates
+func (body *Body) Local2World(v Vect) (Vect) {
+  return body.p.Add(v.Rotate(body.rot))
+}
+
+// Convert world to body local coordinates
+func (body *Body) World2Local(v Vect) (Vect) {
+  return v.Sub(body.p).Unrotate(body.rot)
+}
+
+// Apply an impulse (in world coordinates) to the body at a point relative to 
+// the center of gravity (also in world coordinates).
+func (body *Body) ApplyImpulse(j, r Vect) {
+	body.v  = body.v.Add(j.Mult(body.m_inv))
+	body.w += body.i_inv * r.Cross(j)
+}
+
+// Not intended for external use. Used by cpArbiter.c and cpConstraint.c.
+
+func (body *Body) ApplyBiasImpulse(j, r Vect) {
+	body.v_bias  = body.v_bias.Add(j.Mult(body.m_inv))
+	body.w_bias += body.i_inv * r.Cross(j)
+}
+
+func BodyAlloc() (* Body) {
+  return &Body{}
+}
+
+
+func (body *Body) Init(m Float, i Float) (*Body) {
+	/* Compiler doesn't seem to support these yet, 
+	or maybe I misunderstood the syntax?
+	body.velocityFunc = Body.UpdateVelocity
+	body.positionFunc = Body.UpdatePosition
+	*/
+	
+	body.SetMass(m)
+	body.SetMoment(i)
+
+	body.p = VZERO
+	body.v = VZERO
+	body.f = VZERO
+	
+	body.SetAngle(Float(0.0))
+	body.w = Float(0.0)
+	body.t = Float(0.0)
+	
+	body.v_bias = VZERO
+	body.w_bias = Float(0.0)
+	
+	body.data    = nil
+	body.v_limit = INFINITY
+	body.w_limit = INFINITY
+//	body.active = 1;
+	return body
+}
+
+func BodyNew(m, i Float) (*Body) {
+	return BodyAlloc().Init(m, i)
+}
+
+func (body * Body) Destroy() {
+}
+
+func (body * Body) Free() {
+  if body != nil {	
+    body.Destroy()
+    body = nil
+  }
+}
+
+
+func (body * Body) Slew(pos Vect, dt Float)  {
+  delta := pos.Sub(body.p)
+  body.v = delta.Mult(Float(1.0) / dt)
+}
+
+func (body * Body) UpdateVelocity(gravity Vect, damping, dt Float)  {
+  vdamp := body.v.Mult(damping)  
+  vforc := gravity.Add(body.f.Mult(body.m_inv)).Mult(dt)
+  body.v = vdamp.Add(vforc).Clamp(body.v_limit)
+  w_lim := body.w_limit
+  waid  := body.w*damping + body.t*body.i_inv*dt
+  body.w = waid.Clamp(-w_lim, w_lim)
+}
+
+func (body * Body) UpdatePosition(dt Float)  {
+	body.p = body.p.Add(body.v.Add(body.v_bias).Mult(dt))
+	body.SetAngle(body.a + (body.w + body.w_bias)*dt)
+	body.v_bias = VZERO;
+	body.w_bias = Float(0.0)
+}
+
+func (body * Body) ResetForces() {
+  body.f = VZERO
+  body.t = Float(0.0)
+} 
+
+func (body * Body) ApplyForce(force, r Vect) {
+	body.f  = body.f.Add(force)
+	body.t += r.Cross(force)
+}
+
+func ApplyDampedSpring(a, b *Body, anchr1, anchr2 Vect, rlen, k, dmp, dt Float) {
+	// Calculate the world space anchor coordinates.
+	r1 	:= anchr1.Rotate(a.rot)
+	r2 	:= anchr2.Rotate(b.rot)
+	
+	delta 	:= b.p.Add(r2).Sub(a.p.Add(r1))
+	dist  	:= delta.Length()	
+	n     	:= VZERO
+	if (dist > 0.0) { 
+	  n    = delta.Mult(Float(1.0)/dist) 
+	}  
+	
+	f_spring := (dist - rlen)*k;
+
+	// Calculate the world relative velocities of the anchor points.
+	// This really should be in the impulse solver and can produce problems when using large damping values.
+	v1 	:= a.v.Add(r1.Perp().Mult(a.w))
+	v2 	:= b.v.Add(r2.Perp().Mult(b.w))
+	vrn	:= v2.Sub(v1).Dot(n)
+	f_damp  := vrn * dmp.Min(Float(1.0) / (dt * (a.m_inv + b.m_inv)))
+	
+	// Apply!
+	f 	:= n.Mult(f_spring + f_damp)
+	a.ApplyForce(f		, r1);
+	b.ApplyForce(f.Neg()	, r2);
+}
+
+
+//int
+//cpBodyMarkLowEnergy(cpBody *body, cpFloat dvsq, int max)
+//{
+//	cpFloat ke = body->m*cpvdot(body->v, body->v);
+//	cpFloat re = body->i*body->w*body->w;
+//	
+//	if(ke + re > body->m*dvsq)
+//		body->active = 1;
+//	else if(body->active)
+//		body->active = (body->active + 1)%(max + 1);
+//	else {
+//		body->v = cpvzero;
+//		body->v_bias = cpvzero;
+//		body->w = 0.0f;
+//		body->w_bias = 0.0f;
+//	}
+//	
+//	return body->active;
+//}

+ 351 - 0
src/tamias/collision.go

@@ -0,0 +1,351 @@
+package tamias
+
+
+type CollisionFunc func (a * Shape, b * Shape, c * Contact) (bool);
+
+
+// Add contact points for circle to circle collisions.
+// Used by several collision tests.
+func circle2circleQuery(p1, p2 Vect, r1, r2 Float, con * Contact) (int) {
+  mindist := r1 + r2;
+  delta   := p2.Sub(p1)
+  distsq  := delta.Lengthsq()
+  if distsq >= mindist*mindist { 
+    return 0
+  }  
+  
+  dist    := distsq.Sqrt()
+  // To avoid singularities, do nothing in the case of dist = 0.
+  nz_dist := dist
+  if dist == 0.0 { 
+    nz_dist := INFINITY
+  }
+  
+  c1 := p1.Add(delta.Mult(Float(0.5) + (r1 - Float(0.5)*mindist) / nz_dist))
+  c2 := delta.Mult(Float(1.0) / nz_dist)
+  con.Init(c1, c2, dist - mindist, 0) 
+  return 1
+}
+
+// Collide circle shapes.
+func circle2circle(circ1, circ2 *CircleShape, arr * Contact) (int) {
+  return circle2circleQuery(circ1.tc, circ2.tc, circ1.r, circ2.r, arr);
+}
+
+// Collide circles to segment shapes.
+func circle2segment(circ *CircleShape, seg *SegmentShape, con *Contact) (int) {
+  // Radius sum
+  rsum  := circ.r + seg.r
+  
+  // Calculate normal distance from segment.
+  dn    := seg.tn.Dot(circ.tc) - seg.ta.Dot(seg.tn)
+  dist  := dn.Abs() - rsum
+  if dist > Float(0.0) { return 0 }
+  
+  // Calculate tangential distance along segment.
+  dt    := - seg.tn.Cross(circ.tc)
+  dtMin := - seg.tn.Cross(seg.ta)
+  dtMax := - seg.tn.Cross(seg.tb)
+  
+  // Decision tree to decide which feature of the segment to collide with.
+  if dt < dtMin {
+    if dt < (dtMin - rsum) {
+      return 0
+    } else {
+      return circle2circleQuery(circ.tc, seg.ta, circ.r, seg.r, con)
+    }
+  } else {
+    if dt < dtMax  {
+      n := seg.tn.Neg();
+      if dn < 0.0 { 
+        n = seg.tn
+      }    
+      c1 := n.Mult(circ.r + dist * Float(0.5)) 
+      con.Init(circ.tc, c1, n, dist, 0)
+      return 1
+    } else {
+      if dt < (dtMax + rsum) {
+        return circle2circleQuery(circ.tc, seg.tb, circ.r, seg.r, con)
+      } else {
+        return 0
+      }
+    }
+  }  
+  return 1
+}
+
+// Helper function for working with contact buffers
+// This used to malloc/realloc memory on the fly but was repurposed.
+func nextContactPoint(arr []Contact, num int) (contact * cpContact, num int) {
+  if (num <= MAX_CONTACTS_PER_ARBITER) { 
+    num = num + 1
+  }    
+  return arr[num], num;
+}
+
+// Find the minimum separating axis for the given poly and axis list.
+func findMSA(poly *PolyShape, axes []PolyShapeAxis, num int) (result int, 
+  min_out Float) {
+  
+  min_index   := 0
+  min         := poly.ShapeValueOnAxis(axes.n[0], axes.d[0])
+  if min > Float(0.0) { 
+    return -1, Float(-1.0)
+  }
+
+  for i:=1; i<num; i++ {
+    dist := poly.ShapeValueOnAxis(axes[i].n, axes[i].d)
+    if dist > Float(0.0) {
+      return -1, Float(-1.0)
+    } else if dist > min {
+      min       = dist
+      min_index = i
+    }
+  }
+    
+  min_out = min
+  return min_index, min_out
+}
+
+// Add contacts for penetrating vertexes.
+func findVerts(arr []Contact, poly1, poly2 *PolyShape, n Vect, dist Float) {
+  num := 0;  
+  for i:=0; i<poly1.numVerts; i++ {
+    v := poly1.tVerts[i]
+    if poly2.ContainsVertsPartial(v, n.Neg()) { 
+       con, num := nextContactPoint(arr, num)
+       con.Init(v, n, dist, HASH_PAIR(poly1.Shape.hashid, i))
+    }   
+  }
+  
+  for i:=0; i<poly2.numVerts; i++ {
+    v := poly2.tVerts[i]
+    if poly1.ContainsVertsPartial(v, n.Neg()) { 
+       con, num := nextContactPoint(arr, num)
+       con.Init(v, n, dist, HASH_PAIR(poly2.Shape.hashid, i))
+    }
+  }
+  //  if(!num)
+  //    addContactPoint(arr, &size, &num, cpContactNew(shape1.body.p, n, dist, 0));
+  return num;
+}
+
+// Collide poly shapes together.
+func poly2poly(poly1, poly2 *PolyShape, arr []cpContact) (int) {  
+  mini1, min1 := findMSA(poly2, poly1.tAxes, poly1.numVerts)
+  if mini1 == -1 { 
+    return 0
+  }  
+  
+  mini2, min2 := findMSA(poly2, poly1.tAxes, poly1.numVerts)
+  if mini2 == -1 { 
+    return 0
+  }   
+  // There is overlap, find the penetrating verts
+  if(min1 > min2) { 
+    return findVerts(arr, poly1, poly2, poly1.tAxes[mini1].n, min1)
+  } else {
+    return findVerts(arr, poly1, poly2, poly2.tAxes[mini2].n.Neg(), min2)
+  }  
+}
+
+// Like cpPolyValueOnAxis(), but for segments.
+func segValueOnAxis(seg * SegmentShape, n Vect, d Float) (Float) {
+  a := n.dot(seg.ta) - seg.r
+  b := n.dot(seg.tb) - seg.r
+  return a.Min(b) - d
+}
+
+// Identify vertexes that have penetrated the segment.
+func findPointsBehindSeg(arr [] Contact, num int, seg * SegmentShape, 
+     poly PolyShape, pDist Float, coef Float) (int) {
+  dta := seg.tn.Cross(seg.ta)
+  dtb := seg.tn.Cross(seg.tb)
+  n   := seg.tn.Mult(coef)
+  for i:=0; i < poly.numVerts ; i++ {
+    v := poly.tVerts[i]
+    if v.Dot(n) < (seg.tn.Dot(seg.ta)* coef + seg.r) {     
+      dt := seg.tn.Cross(v)
+      if dta >= dt && dt >= dtb {
+        con, num := nextContactPoint(arr, num)
+        con.Init(v, n, pDist, HASH_PAIR(poly.Shape.hashid, i))
+      }
+    }
+  }
+  return num
+}
+
+// This one is complicated and gross. Just don't go there... (sic)
+// TODO: Comment me!
+func seg2poly(seg SegmentShape, poly *PolyShape, arr []cpContact ) (int) {
+  axes    := poly.tAxes
+  segD    := seg.tn.Dot(seg.ta)
+  minNorm := poly.ShapeValueOnAxis(seg.tn, segD) - seg.r
+  minNeg  := poly.ShapeValueOnAxis(seg.tn.Neg(), -segD) - seg.r
+  
+  if minNeg > float(0.0) || minNorm > float(0.0) { 
+    return 0
+  }  
+  
+  // Find mimimum 
+  mini      := 0
+  poly_min  := segValueOnAxis(seg, axes[0].n, axes[0].d)  
+  if poly_min > Float(0.0) { 
+    return 0
+  }  
+  
+  for  i:=0; i < poly.numVerts ; i++ {
+    dist := segValueOnAxis(seg, axes[i].n, axes[i].d);
+    if dist > Float(0.0) {
+      return 0
+    } else if dist > poly_min {
+      poly_min = dist
+      mini = i
+    }
+  }
+  
+  num     := 0
+  poly_n  := axes[mini].n.Neg();
+  va      := seg.ta.Add(poly_n.Mult(seg.r))
+  vb      := seg.tb.Add(poly_n.Mult(seg.r))
+  if poly.ContainsVert(va) { 
+    con, num := nextContactPoint(arr, num)
+    con.Init(va, poly_n, poly_min, HASH_PAIR(seg.Shape.hashid, 0))
+  }
+  
+  if poly.ContainsVert(vb) { 
+    con, num := nextContactPoint(arr, num)
+    con.Init(va, poly_n, poly_min, HASH_PAIR(seg.Shape.hashid, 1))
+  }   
+  // Floating point precision problems here.
+  // This will have to do for now.
+  poly_min -= CollisionSlop
+  if minNorm >= poly_min || minNeg >= poly_min {
+    if(minNorm > minNeg) {
+      num = findPointsBehindSeg(arr, num, seg, poly, minNorm, Float(1.0));      
+    } else {
+      num = findPointsBehindSeg(arr, num, seg, poly, minNeg, Float(-1.0));
+    }  
+  }
+  
+  // If no other collision points are found, try colliding endpoints.
+  if(num == 0) {
+    poly_a := poly.tVerts[mini]
+    poly_b := poly.tVerts[(mini + 1)%poly.numVerts]
+    
+    if circle2circleQuery(seg.ta, poly_a, seg.r, 0.0f, arr[0]) > 0 {
+      return 1;
+    }  
+      
+    if circle2circleQuery(seg.tb, poly_a, seg.r, 0.0f, arr[0]) > 0 {
+      return 1;
+    } 
+      
+    if circle2circleQuery(seg.ta, poly_b, seg.r, 0.0f, arr[0]) > 0 {
+      return 1;
+    }  
+      
+    if circle2circleQuery(seg.tb, poly_b, seg.r, 0.0f, arr[0]) > 0 {
+      return 1;
+    }  
+  }
+
+  return num;
+}
+
+// This one is less gross, but still gross. (sic)
+// TODO: Comment me!
+func circle2poly(cic *CircleShape, poly *PolyShape, con *Contact) (int) {
+  axes := poly.tAxes;
+  
+  // find minimum  
+  mini := 0;
+  min  := axes[0].n.Dot(circ.tc) - axes[0].d - circ.r
+  for int i=0; i<poly.numVerts; i++ {
+    dist := axes[i].n.Dot(circ.tc) - axes[i].d - circ.r
+    if dist > 0.0 {
+      return 0
+    } else if dist > min {
+      min   = dist
+      mini  = i
+    }
+  }
+  
+  n   := axes[mini].n;
+  a   := poly.tVerts[mini];
+  b   := poly.tVerts[(mini + 1)%poly.numVerts];
+  dta := n.Cross(a)
+  dtb := n.Cross(b) 
+  dt  := n.Cross(n, circ.tc);
+  if dt < dtb {
+    return circle2circleQuery(circ.tc, b, circ.r, Float(0.0), con)
+  } else if dt < dta {
+    con.Init(circ.tc.Sub(n.Mult(circ.r + min / Float(2.0))), n.Neg(), min, 0)  
+    return 1;
+  } else {
+    return circle2circleQuery(circ.tc, a, circ.r, Float(0.0), con)
+  }
+}
+
+/*
+TODO: this must probably be done differently in Go...
+
+//static const collisionFunc builtinCollisionFuncs[9] = {
+//  circle2circle,
+//  NULL,
+//  NULL,
+//  circle2segment,
+//  NULL,
+//  NULL,
+//  circle2poly,
+//  seg2poly,
+//  poly2poly,
+//};
+//static const collisionFunc *colfuncs = builtinCollisionFuncs;
+
+static collisionFunc *colfuncs = NULL;
+
+static void
+addColFunc(cpShapeType a, cpShapeType b, collisionFunc func)
+{
+  colfuncs[a + b*CP_NUM_SHAPES] = func;
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+  void cpInitCollisionFuncs(void);
+  
+  // Initializes the array of collision functions.
+  // Called by cpInitChipmunk().
+  void
+  cpInitCollisionFuncs(void)
+  {
+    if(!colfuncs)
+      colfuncs = (collisionFunc *)cpcalloc(CP_NUM_SHAPES*CP_NUM_SHAPES, sizeof(collisionFunc));
+    
+    addColFunc(CP_CIRCLE_SHAPE,  CP_CIRCLE_SHAPE,  circle2circle);
+    addColFunc(CP_CIRCLE_SHAPE,  CP_SEGMENT_SHAPE, circle2segment);
+    addColFunc(CP_SEGMENT_SHAPE, CP_POLY_SHAPE,    seg2poly);
+    addColFunc(CP_CIRCLE_SHAPE,  CP_POLY_SHAPE,    circle2poly);
+    addColFunc(CP_POLY_SHAPE,    CP_POLY_SHAPE,    poly2poly);
+  } 
+#ifdef __cplusplus
+}
+#endif
+*/
+
+func CollideShapes(cpShape *a, cpShape *b, cpContact *arr) (int) {
+  // Their shape types must be in order.
+  Assert(a.ShapeClass.Type <= b.ShapeClass.Type, 
+    "Collision shapes passed to cpCollideShapes() are not sorted.");
+  /*
+  collisionFunc cfunc = colfuncs[a.klass.type + b.klass.type*CP_NUM_SHAPES];
+  return (cfunc) ? cfunc(a, b, arr) : 0;
+  */
+  // TODO: make this work
+  return 0  
+}
+
+
+

+ 1073 - 0
src/tamias/constraint.go

@@ -0,0 +1,1073 @@
+package tamias
+
+var CONSTRAINT_BIAS_COEF = Float(0.1); 
+
+type Constraint interface {
+  A() (*Body)
+  B() (*Body)
+  Data() (interface{})
+  
+  MaxForce() (Float)
+  BiasCoef() (Float)
+  MaxBias() (Float)
+
+  PreStep(dt, dt_inv int)
+  ApplyImpulse()
+  GetImpulse() (Float)
+}
+
+
+
+type constraint struct {
+  a, b *Body;
+  maxForce, biasCoef, maxBias Float;  
+  data interface{}
+}
+
+type BasicConstraint constraint
+
+
+
+func (c *  constraint) Bodies() (*Body, *Body) {
+  return c.a, c.b
+}
+
+
+func (c *  constraint) MaxForce() (Float) {
+  return c.maxForce
+}
+  
+func (c *  constraint) BiasCoef() (Float) {
+  return c.biasCoef
+}
+
+func (c *  constraint) MaxBias() (Float) {
+  return c.maxBias
+}
+
+func (c * constraint) Init(a, b *Body) (* constraint) {  
+  c.a        = a
+  c.b        = b  
+  c.maxForce = INFINITY
+  c.biasCoef = CONSTRAINT_BIAS_COEF
+  c.maxBias  = INFINITY
+  return c
+}
+
+type SpringConstraint interface {
+  Constraint
+  SpringTorque(Vect) (Float) 
+}
+
+type DampedRotarySpring struct {
+  constraint
+  restAngle , stiffness , damping   Float
+  dt        , target_wrn, iSum      Float
+}
+
+type DampedSpring struct {
+  constraint
+  anchr1, anchr2 Vect
+  restLength, stiffness, damping Float;  
+  dt, target_vrn Float  
+  r1, r2 Vect
+  nMass Float;
+  n Vect;
+}
+
+type GearJoint struct {
+  constraint
+  phase, ratio, ratio_inv, iSum Float
+  bias, jAcc, jMax Float
+} 
+
+type GrooveJoint struct {
+  constraint
+  grv_n, grv_a, grv_b Vect
+  anchr2 Vect  
+  grv_tn Vect
+  clamp Float
+  r1, r2 Vect
+  k1, k2 Vect
+  
+  jAcc Vect
+  jMaxLen Vect
+  bias Vect
+}
+
+
+type PinJoint struct {
+  constraint
+  anchr1, anchr2 Vect
+  dist Float   
+  r1, r2 Vect
+  n Vect
+  nMass Float  
+  jnAcc, jnMax Float
+  bias Float
+}
+
+type PivotJoint struct {
+  constraint
+  anchr1, anchr2 Vect  
+  r1, r2 Vect
+  k1, k2 Vect
+  
+  jAcc Vect
+  jMaxLen Float
+  bias Vect
+}
+
+
+type RatchetJoint struct {
+  constraint
+  angle, phase, ratchet Float  
+  iSum Float
+  bias Float
+  jAcc, jMax Float 
+} 
+
+type RotaryLimitJoint struct {
+  constraint
+  min, max Float;
+  iSum Float;
+  bias Float;
+  jAcc, jMax Float;
+} 
+
+type SimpleMotor struct {
+  constraint
+  rate, iSum, jAcc, jMax Float;
+}
+
+type SlideJoint struct {
+  constraint
+  anchr1, anchr2 Vect;
+  min, max Float;
+  
+  r1, r2 Vect;
+  n Vect;
+  nMass Float;
+  
+  jnAcc, jnMax Float;
+  bias Float;
+} 
+
+
+func (spring * DampedRotarySpring) SpringTorque(relativeAngle Float) (Float) {
+  return (relativeAngle - spring.restAngle)*spring.stiffness;
+}
+
+func (spring * DampedRotarySpring) PreStep(dt, dt_inv Float) {
+  a, b             := spring.Bodies()
+  spring.iSum       = Float(1.0)/(a.i_inv + b.i_inv);
+  spring.dt         = dt;
+  spring.target_wrn = Float(0.0);  
+  // apply spring torque
+  da                := a.a - b.a
+  j_spring          := spring.SpringTorque(da) * dt
+  a.w               -= j_spring * a.i_inv;
+  b.w               += j_spring * b.i_inv;
+}
+
+
+func (spring * DampedRotarySpring) ApplyImpulse() { 
+
+  a, b := spring.Bodies()    
+  // compute relative velocity
+  wrn  := a.w - b.w;
+  
+  //normal_relative_velocity(a, b, r1, r2, n) - spring.target_vrn;
+  
+  // compute velocity loss from drag
+  // not 100% certain this is derived correctly, though it makes sense
+  w_aid  := (-spring.damping*spring.dt/spring.iSum).Exp()
+  w_damp := wrn*(Float(1.0) - w_aid);
+  spring.target_wrn = wrn - w_damp;
+  
+  //apply_impulses(a, b, spring.r1, spring.r2, cpvmult(spring.n, v_damp*spring.nMass));
+  j_damp := w_damp*spring.iSum;
+  a.w   -= j_damp*a.i_inv;
+  b.w   += j_damp*b.i_inv;
+}
+
+func (spring * DampedRotarySpring) GetImpulse() (Float) {
+  return Float(0.0)
+}
+
+
+func DampedRotarySpringAlloc() (* DampedRotarySpring) {
+  return &DampedRotarySpring{}
+}
+
+func (spring * DampedRotarySpring) Init(a, b *Body, restAngle, stiffness,
+      damping Float) (* DampedRotarySpring) {
+       
+  spring.constraint.Init(a, b)  
+  spring.restAngle  = restAngle
+  spring.stiffness  = stiffness
+  spring.damping    = damping  
+  return spring
+}
+
+func DampedRotarySpringNew(a, b *Body, restAngle, stiffness,
+      damping Float) (* DampedRotarySpring) {
+  return DampedRotarySpringAlloc().Init(a, b, restAngle, stiffness, damping)      
+}
+
+/*
+static cpFloat
+defaultSpringForce(cpDampedSpring *spring, cpFloat dist){
+  return (spring->restLength - dist)*spring->stiffness;
+}
+
+static void
+preStep(cpDampedSpring *spring, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = spring->constraint.a;
+  cpBody *b = spring->constraint.b;
+  
+  spring->r1 = cpvrotate(spring->anchr1, a->rot);
+  spring->r2 = cpvrotate(spring->anchr2, b->rot);
+  
+  cpVect delta = cpvsub(cpvadd(b->p, spring->r2), cpvadd(a->p, spring->r1));
+  cpFloat dist = cpvlength(delta);
+  spring->n = cpvmult(delta, 1.0f/(dist ? dist : INFINITY));
+  
+  // calculate mass normal
+  spring->nMass = 1.0f/k_scalar(a, b, spring->r1, spring->r2, spring->n);
+
+  spring->dt = dt;
+  spring->target_vrn = 0.0f;
+
+  // apply spring force
+  cpFloat f_spring = spring->springForceFunc((cpConstraint *)spring, dist);
+  apply_impulses(a, b, spring->r1, spring->r2, cpvmult(spring->n, f_spring*dt));
+}
+
+static void
+applyImpulse(cpDampedSpring *spring)
+{
+  cpBody *a = spring->constraint.a;
+  cpBody *b = spring->constraint.b;
+  
+  cpVect n = spring->n;
+  cpVect r1 = spring->r1;
+  cpVect r2 = spring->r2;
+
+  // compute relative velocity
+  cpFloat vrn = normal_relative_velocity(a, b, r1, r2, n) - spring->target_vrn;
+  
+  // compute velocity loss from drag
+  // not 100% certain this is derived correctly, though it makes sense
+  cpFloat v_damp = -vrn*(1.0f - cpfexp(-spring->damping*spring->dt/spring->nMass));
+  spring->target_vrn = vrn + v_damp;
+  
+  apply_impulses(a, b, spring->r1, spring->r2, cpvmult(spring->n, v_damp*spring->nMass));
+}
+
+static cpFloat
+getImpulse(cpConstraint *constraint)
+{
+  return 0.0f;
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpDampedSpring)
+
+cpDampedSpring *
+cpDampedSpringAlloc(void)
+{
+  return (cpDampedSpring *)cpmalloc(sizeof(cpDampedSpring));
+}
+
+cpDampedSpring *
+cpDampedSpringInit(cpDampedSpring *spring, cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2, cpFloat restLength, cpFloat stiffness, cpFloat damping)
+{
+  cpConstraintInit((cpConstraint *)spring, cpDampedSpringGetClass(), a, b);
+  
+  spring->anchr1 = anchr1;
+  spring->anchr2 = anchr2;
+  
+  spring->restLength = restLength;
+  spring->stiffness = stiffness;
+  spring->damping = damping;
+  spring->springForceFunc = (cpDampedSpringForceFunc)defaultSpringForce;
+  
+  return spring;
+}
+
+cpConstraint *
+cpDampedSpringNew(cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2, cpFloat restLength, cpFloat stiffness, cpFloat damping)
+{
+  return (cpConstraint *)cpDampedSpringInit(cpDampedSpringAlloc(), a, b, anchr1, anchr2, restLength, stiffness, damping);
+}
+
+static void
+preStep(cpGearJoint *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  // calculate moment of inertia coefficient.
+  joint->iSum = 1.0f/(a->i_inv*joint->ratio_inv + joint->ratio*b->i_inv);
+  
+  // calculate bias velocity
+  cpFloat maxBias = joint->constraint.maxBias;
+  joint->bias = cpfclamp(-joint->constraint.biasCoef*dt_inv*(b->a*joint->ratio - a->a - joint->phase), -maxBias, maxBias);
+  
+  // compute max impulse
+  joint->jMax = J_MAX(joint, dt);
+
+  // apply joint torque
+  cpFloat j = joint->jAcc;
+  a->w -= j*a->i_inv*joint->ratio_inv;
+  b->w += j*b->i_inv;
+}
+
+static void
+applyImpulse(cpGearJoint *joint)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  // compute relative rotational velocity
+  cpFloat wr = b->w*joint->ratio - a->w;
+  
+  // compute normal impulse 
+  cpFloat j = (joint->bias - wr)*joint->iSum;
+  cpFloat jOld = joint->jAcc;
+  joint->jAcc = cpfclamp(jOld + j, -joint->jMax, joint->jMax);
+  j = joint->jAcc - jOld;
+  
+  // apply impulse
+  a->w -= j*a->i_inv*joint->ratio_inv;
+  b->w += j*b->i_inv;
+}
+
+static cpFloat
+getImpulse(cpGearJoint *joint)
+{
+  return cpfabs(joint->jAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpGearJoint)
+
+cpGearJoint *
+cpGearJointAlloc(void)
+{
+  return (cpGearJoint *)cpmalloc(sizeof(cpGearJoint));
+}
+
+cpGearJoint *
+cpGearJointInit(cpGearJoint *joint, cpBody *a, cpBody *b, cpFloat phase, cpFloat ratio)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->phase = phase;
+  joint->ratio = ratio;
+  joint->ratio_inv = 1.0f/ratio;
+  
+  joint->jAcc = 0.0f;
+  
+  return joint;
+}
+
+cpConstraint *
+cpGearJointNew(cpBody *a, cpBody *b, cpFloat phase, cpFloat ratio)
+{
+  return (cpConstraint *)cpGearJointInit(cpGearJointAlloc(), a, b, phase, ratio);
+}
+
+void
+cpGearJointSetRatio(cpConstraint *constraint, cpFloat value)
+{
+  cpConstraintCheckCast(constraint, cpGearJoint);
+  ((cpGearJoint *)constraint)->ratio = value;
+  ((cpGearJoint *)constraint)->ratio_inv = 1.0f/value;
+}
+
+
+static void
+preStep(cpGrooveJoint *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  // calculate endpoints in worldspace
+  cpVect ta = cpBodyLocal2World(a, joint->grv_a);
+  cpVect tb = cpBodyLocal2World(a, joint->grv_b);
+
+  // calculate axis
+  cpVect n = cpvrotate(joint->grv_n, a->rot);
+  cpFloat d = cpvdot(ta, n);
+  
+  joint->grv_tn = n;
+  joint->r2 = cpvrotate(joint->anchr2, b->rot);
+  
+  // calculate tangential distance along the axis of r2
+  cpFloat td = cpvcross(cpvadd(b->p, joint->r2), n);
+  // calculate clamping factor and r2
+  if(td <= cpvcross(ta, n)){
+    joint->clamp = 1.0f;
+    joint->r1 = cpvsub(ta, a->p);
+  } else if(td >= cpvcross(tb, n)){
+    joint->clamp = -1.0f;
+    joint->r1 = cpvsub(tb, a->p);
+  } else {
+    joint->clamp = 0.0f;
+    joint->r1 = cpvsub(cpvadd(cpvmult(cpvperp(n), -td), cpvmult(n, d)), a->p);
+  }
+  
+  // Calculate mass tensor
+  k_tensor(a, b, joint->r1, joint->r2, &joint->k1, &joint->k2); 
+  
+  // compute max impulse
+  joint->jMaxLen = J_MAX(joint, dt);
+  
+  // calculate bias velocity
+  cpVect delta = cpvsub(cpvadd(b->p, joint->r2), cpvadd(a->p, joint->r1));
+  joint->bias = cpvclamp(cpvmult(delta, -joint->constraint.biasCoef*dt_inv), joint->constraint.maxBias);
+  
+  // apply accumulated impulse
+  apply_impulses(a, b, joint->r1, joint->r2, joint->jAcc);
+}
+
+static inline cpVect
+grooveConstrain(cpGrooveJoint *joint, cpVect j){
+  cpVect n = joint->grv_tn;
+  cpVect jClamp = (joint->clamp*cpvcross(j, n) > 0.0f) ? j : cpvproject(j, n);
+  return cpvclamp(jClamp, joint->jMaxLen);
+}
+
+static void
+applyImpulse(cpGrooveJoint *joint)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  cpVect r1 = joint->r1;
+  cpVect r2 = joint->r2;
+  
+  // compute impulse
+  cpVect vr = relative_velocity(a, b, r1, r2);
+
+  cpVect j = mult_k(cpvsub(joint->bias, vr), joint->k1, joint->k2);
+  cpVect jOld = joint->jAcc;
+  joint->jAcc = grooveConstrain(joint, cpvadd(jOld, j));
+  j = cpvsub(joint->jAcc, jOld);
+  
+  // apply impulse
+  apply_impulses(a, b, joint->r1, joint->r2, j);
+}
+
+static cpFloat
+getImpulse(cpGrooveJoint *joint)
+{
+  return cpvlength(joint->jAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpGrooveJoint)
+
+cpGrooveJoint *
+cpGrooveJointAlloc(void)
+{
+  return (cpGrooveJoint *)cpmalloc(sizeof(cpGrooveJoint));
+}
+
+cpGrooveJoint *
+cpGrooveJointInit(cpGrooveJoint *joint, cpBody *a, cpBody *b, cpVect groove_a, cpVect groove_b, cpVect anchr2)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->grv_a = groove_a;
+  joint->grv_b = groove_b;
+  joint->grv_n = cpvperp(cpvnormalize(cpvsub(groove_b, groove_a)));
+  joint->anchr2 = anchr2;
+  
+  joint->jAcc = cpvzero;
+  
+  return joint;
+}
+
+cpConstraint *
+cpGrooveJointNew(cpBody *a, cpBody *b, cpVect groove_a, cpVect groove_b, cpVect anchr2)
+{
+  return (cpConstraint *)cpGrooveJointInit(cpGrooveJointAlloc(), a, b, groove_a, groove_b, anchr2);
+}
+
+static void
+preStep(cpPinJoint *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  joint->r1 = cpvrotate(joint->anchr1, a->rot);
+  joint->r2 = cpvrotate(joint->anchr2, b->rot);
+  
+  cpVect delta = cpvsub(cpvadd(b->p, joint->r2), cpvadd(a->p, joint->r1));
+  cpFloat dist = cpvlength(delta);
+  joint->n = cpvmult(delta, 1.0f/(dist ? dist : (cpFloat)INFINITY));
+  
+  // calculate mass normal
+  joint->nMass = 1.0f/k_scalar(a, b, joint->r1, joint->r2, joint->n);
+  
+  // calculate bias velocity
+  cpFloat maxBias = joint->constraint.maxBias;
+  joint->bias = cpfclamp(-joint->constraint.biasCoef*dt_inv*(dist - joint->dist), -maxBias, maxBias);
+  
+  // compute max impulse
+  joint->jnMax = J_MAX(joint, dt);
+  
+  // apply accumulated impulse
+  cpVect j = cpvmult(joint->n, joint->jnAcc);
+  apply_impulses(a, b, joint->r1, joint->r2, j);
+}
+
+static void
+applyImpulse(cpPinJoint *joint)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  cpVect n = joint->n;
+
+  // compute relative velocity
+  cpFloat vrn = normal_relative_velocity(a, b, joint->r1, joint->r2, n);
+  
+  // compute normal impulse
+  cpFloat jn = (joint->bias - vrn)*joint->nMass;
+  cpFloat jnOld = joint->jnAcc;
+  joint->jnAcc = cpfclamp(jnOld + jn, -joint->jnMax, joint->jnMax);
+  jn = joint->jnAcc - jnOld;
+  
+  // apply impulse
+  apply_impulses(a, b, joint->r1, joint->r2, cpvmult(n, jn));
+}
+
+static cpFloat
+getImpulse(cpPinJoint *joint)
+{
+  return cpfabs(joint->jnAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpPinJoint);
+
+
+cpPinJoint *
+cpPinJointAlloc(void)
+{
+  return (cpPinJoint *)cpmalloc(sizeof(cpPinJoint));
+}
+
+cpPinJoint *
+cpPinJointInit(cpPinJoint *joint, cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->anchr1 = anchr1;
+  joint->anchr2 = anchr2;
+  
+  cpVect p1 = cpvadd(a->p, cpvrotate(anchr1, a->rot));
+  cpVect p2 = cpvadd(b->p, cpvrotate(anchr2, b->rot));
+  joint->dist = cpvlength(cpvsub(p2, p1));
+
+  joint->jnAcc = 0.0f;
+  
+  return joint;
+}
+
+cpConstraint *
+cpPinJointNew(cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2)
+{
+  return (cpConstraint *)cpPinJointInit(cpPinJointAlloc(), a, b, anchr1, anchr2);
+}
+
+static void
+preStep(cpPivotJoint *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  joint->r1 = cpvrotate(joint->anchr1, a->rot);
+  joint->r2 = cpvrotate(joint->anchr2, b->rot);
+  
+  // Calculate mass tensor
+  k_tensor(a, b, joint->r1, joint->r2, &joint->k1, &joint->k2);
+  
+  // compute max impulse
+  joint->jMaxLen = J_MAX(joint, dt);
+  
+  // calculate bias velocity
+  cpVect delta = cpvsub(cpvadd(b->p, joint->r2), cpvadd(a->p, joint->r1));
+  joint->bias = cpvclamp(cpvmult(delta, -joint->constraint.biasCoef*dt_inv), joint->constraint.maxBias);
+  
+  // apply accumulated impulse
+  apply_impulses(a, b, joint->r1, joint->r2, joint->jAcc);
+}
+
+static void
+applyImpulse(cpPivotJoint *joint)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  cpVect r1 = joint->r1;
+  cpVect r2 = joint->r2;
+    
+  // compute relative velocity
+  cpVect vr = relative_velocity(a, b, r1, r2);
+  
+  // compute normal impulse
+  cpVect j = mult_k(cpvsub(joint->bias, vr), joint->k1, joint->k2);
+  cpVect jOld = joint->jAcc;
+  joint->jAcc = cpvclamp(cpvadd(joint->jAcc, j), joint->jMaxLen);
+  j = cpvsub(joint->jAcc, jOld);
+  
+  // apply impulse
+  apply_impulses(a, b, joint->r1, joint->r2, j);
+}
+
+static cpFloat
+getImpulse(cpConstraint *joint)
+{
+  return cpvlength(((cpPivotJoint *)joint)->jAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpPivotJoint)
+
+cpPivotJoint *
+cpPivotJointAlloc(void)
+{
+  return (cpPivotJoint *)cpmalloc(sizeof(cpPivotJoint));
+}
+
+cpPivotJoint *
+cpPivotJointInit(cpPivotJoint *joint, cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->anchr1 = anchr1;
+  joint->anchr2 = anchr2;
+  
+  joint->jAcc = cpvzero;
+  
+  return joint;
+}
+
+cpConstraint *
+cpPivotJointNew2(cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2)
+{
+  return (cpConstraint *)cpPivotJointInit(cpPivotJointAlloc(), a, b, anchr1, anchr2);
+}
+
+cpConstraint *
+cpPivotJointNew(cpBody *a, cpBody *b, cpVect pivot)
+{
+  return cpPivotJointNew2(a, b, cpBodyWorld2Local(a, pivot), cpBodyWorld2Local(b, pivot));
+}
+
+static void
+preStep(cpRatchetJoint *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  cpFloat angle = joint->angle;
+  cpFloat phase = joint->phase;
+  cpFloat ratchet = joint->ratchet;
+  
+  cpFloat delta = b->a - a->a;
+  cpFloat diff = angle - delta;
+  cpFloat pdist = 0.0f;
+  
+  if(diff*ratchet > 0.0f){
+    pdist = diff;
+  } else {
+    joint->angle = cpffloor((delta - phase)/ratchet)*ratchet + phase;
+  }
+  
+  // calculate moment of inertia coefficient.
+  joint->iSum = 1.0f/(a->i_inv + b->i_inv);
+  
+  // calculate bias velocity
+  cpFloat maxBias = joint->constraint.maxBias;
+  joint->bias = cpfclamp(-joint->constraint.biasCoef*dt_inv*pdist, -maxBias, maxBias);
+  
+  // compute max impulse
+  joint->jMax = J_MAX(joint, dt);
+
+  // If the bias is 0, the joint is not at a limit. Reset the impulse.
+  if(!joint->bias)
+    joint->jAcc = 0.0f;
+
+  // apply joint torque
+  a->w -= joint->jAcc*a->i_inv;
+  b->w += joint->jAcc*b->i_inv;
+}
+
+static void
+applyImpulse(cpRatchetJoint *joint)
+{
+  if(!joint->bias) return; // early exit
+
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  // compute relative rotational velocity
+  cpFloat wr = b->w - a->w;
+  cpFloat ratchet = joint->ratchet;
+  
+  // compute normal impulse 
+  cpFloat j = -(joint->bias + wr)*joint->iSum;
+  cpFloat jOld = joint->jAcc;
+  joint->jAcc = cpfclamp((jOld + j)*ratchet, 0.0f, joint->jMax*cpfabs(ratchet))/ratchet;
+  j = joint->jAcc - jOld;
+  
+  // apply impulse
+  a->w -= j*a->i_inv;
+  b->w += j*b->i_inv;
+}
+
+static cpFloat
+getImpulse(cpRatchetJoint *joint)
+{
+  return cpfabs(joint->jAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpRatchetJoint)
+
+cpRatchetJoint *
+cpRatchetJointAlloc(void)
+{
+  return (cpRatchetJoint *)cpmalloc(sizeof(cpRatchetJoint));
+}
+
+cpRatchetJoint *
+cpRatchetJointInit(cpRatchetJoint *joint, cpBody *a, cpBody *b, cpFloat phase, cpFloat ratchet)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->angle = 0.0f;
+  joint->phase = phase;
+  joint->ratchet = ratchet;
+  
+  joint->angle = b->a - a->a;
+  
+  return joint;
+}
+
+cpConstraint *
+cpRatchetJointNew(cpBody *a, cpBody *b, cpFloat phase, cpFloat ratchet)
+{
+  return (cpConstraint *)cpRatchetJointInit(cpRatchetJointAlloc(), a, b, phase, ratchet);
+}
+
+static void
+preStep(cpRotaryLimitJoint *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  cpFloat dist = b->a - a->a;
+  cpFloat pdist = 0.0f;
+  if(dist > joint->max) {
+    pdist = joint->max - dist;
+  } else if(dist < joint->min) {
+    pdist = joint->min - dist;
+  }
+  
+  // calculate moment of inertia coefficient.
+  joint->iSum = 1.0f/(a->i_inv + b->i_inv);
+  
+  // calculate bias velocity
+  cpFloat maxBias = joint->constraint.maxBias;
+  joint->bias = cpfclamp(-joint->constraint.biasCoef*dt_inv*(pdist), -maxBias, maxBias);
+  
+  // compute max impulse
+  joint->jMax = J_MAX(joint, dt);
+
+  // If the bias is 0, the joint is not at a limit. Reset the impulse.
+  if(!joint->bias)
+    joint->jAcc = 0.0f;
+
+  // apply joint torque
+  a->w -= joint->jAcc*a->i_inv;
+  b->w += joint->jAcc*b->i_inv;
+}
+
+static void
+applyImpulse(cpRotaryLimitJoint *joint)
+{
+  if(!joint->bias) return; // early exit
+
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  // compute relative rotational velocity
+  cpFloat wr = b->w - a->w;
+  
+  // compute normal impulse 
+  cpFloat j = -(joint->bias + wr)*joint->iSum;
+  cpFloat jOld = joint->jAcc;
+  if(joint->bias < 0.0f){
+    joint->jAcc = cpfclamp(jOld + j, 0.0f, joint->jMax);
+  } else {
+    joint->jAcc = cpfclamp(jOld + j, -joint->jMax, 0.0f);
+  }
+  j = joint->jAcc - jOld;
+  
+  // apply impulse
+  a->w -= j*a->i_inv;
+  b->w += j*b->i_inv;
+}
+
+static cpFloat
+getImpulse(cpRotaryLimitJoint *joint)
+{
+  return cpfabs(joint->jAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpRotaryLimitJoint)
+
+cpRotaryLimitJoint *
+cpRotaryLimitJointAlloc(void)
+{
+  return (cpRotaryLimitJoint *)cpmalloc(sizeof(cpRotaryLimitJoint));
+}
+
+cpRotaryLimitJoint *
+cpRotaryLimitJointInit(cpRotaryLimitJoint *joint, cpBody *a, cpBody *b, cpFloat min, cpFloat max)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->min = min;
+  joint->max  = max;
+  
+  joint->jAcc = 0.0f;
+  
+  return joint;
+}
+
+cpConstraint *
+cpRotaryLimitJointNew(cpBody *a, cpBody *b, cpFloat min, cpFloat max)
+{
+  return (cpConstraint *)cpRotaryLimitJointInit(cpRotaryLimitJointAlloc(), a, b, min, max);
+}
+
+static void
+preStep(cpSimpleMotor *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  // calculate moment of inertia coefficient.
+  joint->iSum = 1.0f/(a->i_inv + b->i_inv);
+  
+  // compute max impulse
+  joint->jMax = J_MAX(joint, dt);
+
+  // apply joint torque
+  a->w -= joint->jAcc*a->i_inv;
+  b->w += joint->jAcc*b->i_inv;
+}
+
+static void
+applyImpulse(cpSimpleMotor *joint)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  // compute relative rotational velocity
+  cpFloat wr = b->w - a->w + joint->rate;
+  
+  // compute normal impulse 
+  cpFloat j = -wr*joint->iSum;
+  cpFloat jOld = joint->jAcc;
+  joint->jAcc = cpfclamp(jOld + j, -joint->jMax, joint->jMax);
+  j = joint->jAcc - jOld;
+  
+  // apply impulse
+  a->w -= j*a->i_inv;
+  b->w += j*b->i_inv;
+}
+
+static cpFloat
+getImpulse(cpSimpleMotor *joint)
+{
+  return cpfabs(joint->jAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpSimpleMotor)
+
+cpSimpleMotor *
+cpSimpleMotorAlloc(void)
+{
+  return (cpSimpleMotor *)cpmalloc(sizeof(cpSimpleMotor));
+}
+
+cpSimpleMotor *
+cpSimpleMotorInit(cpSimpleMotor *joint, cpBody *a, cpBody *b, cpFloat rate)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->rate = rate;
+  
+  joint->jAcc = 0.0f;
+  
+  return joint;
+}
+
+cpConstraint *
+cpSimpleMotorNew(cpBody *a, cpBody *b, cpFloat rate)
+{
+  return (cpConstraint *)cpSimpleMotorInit(cpSimpleMotorAlloc(), a, b, rate);
+}
+
+static void
+preStep(cpSlideJoint *joint, cpFloat dt, cpFloat dt_inv)
+{
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  joint->r1 = cpvrotate(joint->anchr1, a->rot);
+  joint->r2 = cpvrotate(joint->anchr2, b->rot);
+  
+  cpVect delta = cpvsub(cpvadd(b->p, joint->r2), cpvadd(a->p, joint->r1));
+  cpFloat dist = cpvlength(delta);
+  cpFloat pdist = 0.0f;
+  if(dist > joint->max) {
+    pdist = dist - joint->max;
+  } else if(dist < joint->min) {
+    pdist = joint->min - dist;
+    dist = -dist;
+  }
+  joint->n = cpvmult(delta, 1.0f/(dist ? dist : (cpFloat)INFINITY));
+  
+  // calculate mass normal
+  joint->nMass = 1.0f/k_scalar(a, b, joint->r1, joint->r2, joint->n);
+  
+  // calculate bias velocity
+  cpFloat maxBias = joint->constraint.maxBias;
+  joint->bias = cpfclamp(-joint->constraint.biasCoef*dt_inv*(pdist), -maxBias, maxBias);
+  
+  // compute max impulse
+  joint->jnMax = J_MAX(joint, dt);
+
+  // apply accumulated impulse
+  if(!joint->bias) //{
+    // if bias is 0, then the joint is not at a limit.
+    joint->jnAcc = 0.0f;
+//  } else {
+    cpVect j = cpvmult(joint->n, joint->jnAcc);
+    apply_impulses(a, b, joint->r1, joint->r2, j);
+//  }
+}
+
+static void
+applyImpulse(cpSlideJoint *joint)
+{
+  if(!joint->bias) return;  // early exit
+
+  cpBody *a = joint->constraint.a;
+  cpBody *b = joint->constraint.b;
+  
+  cpVect n = joint->n;
+  cpVect r1 = joint->r1;
+  cpVect r2 = joint->r2;
+    
+  // compute relative velocity
+  cpVect vr = relative_velocity(a, b, r1, r2);
+  cpFloat vrn = cpvdot(vr, n);
+  
+  // compute normal impulse
+  cpFloat jn = (joint->bias - vrn)*joint->nMass;
+  cpFloat jnOld = joint->jnAcc;
+  joint->jnAcc = cpfclamp(jnOld + jn, -joint->jnMax, 0.0f);
+  jn = joint->jnAcc - jnOld;
+  
+  // apply impulse
+  apply_impulses(a, b, joint->r1, joint->r2, cpvmult(n, jn));
+}
+
+static cpFloat
+getImpulse(cpConstraint *joint)
+{
+  return cpfabs(((cpSlideJoint *)joint)->jnAcc);
+}
+
+static const cpConstraintClass klass = {
+  (cpConstraintPreStepFunction)preStep,
+  (cpConstraintApplyImpulseFunction)applyImpulse,
+  (cpConstraintGetImpulseFunction)getImpulse,
+};
+CP_DefineClassGetter(cpSlideJoint)
+
+cpSlideJoint *
+cpSlideJointAlloc(void)
+{
+  return (cpSlideJoint *)cpmalloc(sizeof(cpSlideJoint));
+}
+
+cpSlideJoint *
+cpSlideJointInit(cpSlideJoint *joint, cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2, cpFloat min, cpFloat max)
+{
+  cpConstraintInit((cpConstraint *)joint, &klass, a, b);
+  
+  joint->anchr1 = anchr1;
+  joint->anchr2 = anchr2;
+  joint->min = min;
+  joint->max = max;
+  
+  joint->jnAcc = 0.0f;
+  
+  return joint;
+}
+
+cpConstraint *
+cpSlideJointNew(cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2, cpFloat min, cpFloat max)
+{
+  return (cpConstraint *)cpSlideJointInit(cpSlideJointAlloc(), a, b, anchr1, anchr2, min, max);
+}
+
+
+
+*/

+ 111 - 0
src/tamias/float.go

@@ -0,0 +1,111 @@
+package tamias
+
+//Float, a floating point type for Tamias, with some method sugar.
+
+import "math"
+import "fmt"
+
+type Float float32
+
+func F64Float(in float64) Float {
+	return Float(in)
+}
+
+func (self Float) Float64() float64 {
+	return float64(self)
+}
+
+func (self Float) Eqf(other float64) bool {
+	return self == Float(other)
+}
+
+func (self Float) Equals(other Float) bool {
+	return self == other
+}
+
+func (self Float) Sqrt() Float {
+	return Float(math.Sqrt(self.Float64()))
+}
+
+func (self Float) Exp() Float {
+	return Float(math.Exp(self.Float64()))
+}
+
+func (self Float) Floor() Float {
+	return Float(math.Floor(self.Float64()))
+}
+
+func (a Float) Max(b Float) Float {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func (a Float) Min(b Float) Float {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func (a Float) Mod(b Float) Float {
+	return Float(math.Mod(a.Float64(), b.Float64()))
+}
+
+func (a Float) Abs() Float {
+	if a < 0.0 {
+		return -a
+	}
+	return a
+}
+
+func (a Float) Sin() Float {
+	return Float(math.Sin(a.Float64()))
+}
+
+func (a Float) Cos() Float {
+	return Float(math.Cos(a.Float64()))
+}
+
+func (a Float) Tan() Float {
+	return Float(math.Tan(a.Float64()))
+}
+
+func (a Float) Acos() Float {
+	return Float(math.Acos(a.Float64()))
+}
+
+func (a Float) Asin() Float {
+	return Float(math.Asin(a.Float64()))
+}
+
+func (a Float) Atan() Float {
+	return Float(math.Atan(a.Float64()))
+}
+
+func (x Float) Atan2(y Float) Float {
+	return Float(math.Atan2(y.Float64(), x.Float64()))
+}
+
+func (f Float) Clamp(min Float, max Float) Float {
+	return f.Max(min).Min(max)
+}
+
+func (f1 Float) Lerp(f2 Float, t Float) Float {
+	return (f1 * (1.0 - t)) + (f2 * t)
+}
+
+func (f1 Float) Flerpconst(f2 Float, d Float) Float {
+	return f1 + (f2-f1).Clamp(-d, d)
+}
+
+func (a Float) VectForAngle() Vect {
+	return V(a.Cos(), a.Sin())
+}
+
+var INFINITY = Float(math.Inf(1))
+
+func (self Float) String() string {
+	return fmt.Sprintf("%f", self)
+}

+ 100 - 0
src/tamias/hashmap.go

@@ -0,0 +1,100 @@
+package tamias
+
+// HashMap is a wrapped hash map, that uses the go map
+// it's an alternative for HashSet
+/*
+type HashValue int64
+
+type HashElement interface {
+  Equals(interface {}) (bool)
+}
+*/
+
+type HashMap struct { 
+  table map[HashValue] HashElement
+}
+
+func HashMapAlloc() (* HashMap) {
+  return &HashMap{}
+} 
+
+func (hash * HashMap) Init(size int) (* HashMap) {
+  hash.table         = make(map[HashValue] HashElement, size)
+  return hash
+  // set.buffers       = nil
+}
+
+func HashMapNew(size int) (HashMap) {
+  return HashSetAlloc().Init(size)
+} 
+
+ 
+
+func (hash * HashSet) Insert(key HashValue, value HashElement) (HashElement) {
+  hash.table[key] = value  
+   
+  idx := set.hashIndex(hash)
+  
+  // Find the bin with the matching element.
+  bin := set.findBin(hash, ptr)
+  
+  // Create it if necessary.
+  if bin == nil {
+    bin = set.getUnusedBin()
+    bin.hash = hash
+    bin.elt  = ptr //XXX Transformation not called here. Is it needed?
+    
+    bin.next = set.table[idx]
+    set.table[idx] = bin
+    
+    set.entries++
+    
+    // Resize the set if it's full.
+    if set.IsFull() { 
+      set.Resize()
+    }  
+  } else { 
+  // this is not in Chipmunk original but seems like a bug not to do it
+    bin.elt = ptr
+  }  
+  return bin.elt
+}  
+
+func (set * HashSet) Remove(hash HashValue, ptr HashElement) (HashElement) {
+  bin, prev := set.findBinPrev(hash, ptr)
+  // Remove it if it exists.
+  if bin != nil {
+    // Update the previous bin next pointer to point to the next bin.
+    if prev != nil { 
+      prev.next = bin.next
+    }
+    set.entries--
+    return_value := bin.elt
+    set.recycleBin(bin)
+    return return_value
+  }  
+  return nil;
+}
+
+func (set * HashSet) Find(hash HashValue, ptr HashElement) (HashElement) {
+  bin := set.findBin(hash, ptr)
+  if bin != nil {
+    return bin.elt 
+  }
+  return set.default_value
+}
+
+type HashSetIterFunc func(bin, data HashElement) 
+
+func (set * HashSet) Each(fun HashSetIterFunc, data HashElement) { 
+  for i:=0 ; i<set.size ; i++ {
+    bin := set.table[i];
+    for bin != nil {
+      next := bin.next;
+      fun(bin.elt, data);
+      bin = next;
+    }
+  }
+}
+
+type HashSetFilterFunc func(bin, data HashElement) (bool)

+ 298 - 0
src/tamias/hashset.go

@@ -0,0 +1,298 @@
+package tamias
+
+// HashSet uses a chained hashtable implementation.
+// Other than the transformation functions, there is nothing fancy going on.
+type HashValue int64
+
+type HashElement interface {
+  Equals(interface {}) (bool)
+  /*
+  Iter() (*HashElement)
+  Filter() (bool)
+  */
+}
+
+// cpHashSetBin's form the linked lists in the chained hash table.
+type HashSetBin struct {
+  // Pointer to the element.
+  elt HashElement
+  // Hash value of the element.
+  hash HashValue
+  // Next element in the chain.
+  next * HashSetBin
+}
+
+/*
+// Equality function. Returns true if ptr is equal to elt.
+typedef int (*cpHashSetEqlFunc)(void *ptr, void *elt);
+// Used by cpHashSetInsert(). Called to transform the ptr into an element.
+typedef void *(*cpHashSetTransFunc)(void *ptr, void *data);
+// Iterator function for a hashset.
+typedef void (*cpHashSetIterFunc)(void *elt, void *data);
+// Filter function. Returns false if elt should be dropped.
+typedef int (*cpHashSetFilterFunc)(void *elt, void *data);
+*/
+
+type HashSet struct {
+  // Number of elements stored in the table.
+  entries int;
+  // Number of cells in the table.
+  size int;
+  
+  /*
+  cpHashSetEqlFunc eql;
+  cpHashSetTransFunc trans;
+  (*HashElement) Iterate(*HashElement) (bool)
+  */
+  
+  // Default value returned by cpHashSetFind() when no element is found.
+  // Defaults to NULL.
+  default_value HashElement;
+  
+  // The table and recycled bins
+  table       []*HashSetBin
+  pooledbins    *HashSetBin
+  // Will use Go's array for this in stead.
+  // cpArray *allocatedBuffers; 
+}
+
+
+
+// Used for resizing hash tables.
+// Values approximately double.
+// http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
+var primes = [30]int{ 5, 13, 23, 47, 97, 193, 389, 769, 1543, 3079,
+  6151, 12289, 24593, 49157, 98317,  196613,  393241,  786433,
+  1572869,  3145739,  6291469,  12582917,  25165843,  50331653,
+  100663319,  201326611,  402653189,  805306457,  1610612741,  0 }
+
+func next_prime(n int) (int) {
+  i := 0
+  for n > primes[i] {
+    i++
+    Assert(primes[i] > 0, 
+    "Tried to resize a hash table to a size greater than 1610612741 O_o"); 
+    // realistically this should never happen
+  }  
+  return primes[i]
+}
+
+
+func (set * HashSet) Destroy() {
+  set.table       = nil
+  set.pooledbins  = nil
+}
+
+func (set * HashSet) Free() {
+
+}
+
+func HashSetAlloc() (* HashSet) {
+  return &HashSet{}
+} 
+
+func (set * HashSet) Init(size int) (* HashSet) {
+  set.size          = next_prime(size)
+  set.entries       = 0
+  set.default_value = nil
+  set.table         = make([]*HashSetBin, 0, set.size)
+  set.pooledbins    = nil
+  return set
+  // set.buffers       = nil
+}
+
+
+func HashSetNew(size int) (* HashSet) {
+  return HashSetAlloc().Init(size)
+} 
+
+func (set * HashSet) IsFull() bool { 
+  return (set.entries >= set.size);
+}  
+
+func (set * HashSet) Resize() {
+  // Get the next approximate doubled prime.
+  newsize := next_prime(set.size + 1)
+  // Allocate a new table.
+  newtable := make([]*HashSetBin, set.size, newsize) 
+  // Iterate over the chains.
+  for  i:=0 ; i < set.size ; i++ {
+    // Rehash the bins into the new table.
+    bin := set.table[i];
+    var next *HashSetBin 
+    var idx int    
+    for bin != nil {
+      next          = bin.next      
+      idx           = int(bin.hash) % newsize;
+      bin.next      = newtable[idx]
+      newtable[idx] = bin; 
+      bin           = next;
+    }
+  }
+  set.table = newtable
+  set.size  = newsize    
+}  
+
+
+func (set * HashSet) recycleBin(bin * HashSetBin) {
+  bin.next        = set.pooledbins
+  set.pooledbins  = bin  
+  bin.elt         = nil
+}  
+  
+func (set * HashSet) getUnusedBin() (* HashSetBin) {    
+  bin := set.pooledbins
+  if bin != nil {
+    set.pooledbins = bin.next
+    return bin
+  } 
+  // Pool is exhausted, make 100 more
+  count   := 100
+  newbins := make([]HashSetBin, count)
+  // push all but the first one, return the first instead
+  for i:=1; i<count; i++ { 
+    set.recycleBin(&newbins[i])
+  }    
+  return &newbins[0] 
+}
+
+func (set *HashSet) hashIndex(hash HashValue) (HashValue) {
+  return hash % HashValue(set.size)
+}
+
+// Find the correct has bin for this element, or nil if not found
+func (set *HashSet) findBin(hash HashValue, ptr HashElement) (*HashSetBin) {
+  idx := set.hashIndex(hash)
+  bin := set.table[idx]
+  // Follow the chained elements in the bin until the element is equal
+  for bin != nil && !ptr.Equals(bin.elt) { 
+    bin = bin.next
+  }
+  return bin;
+}
+
+// Find the correct has bin for this element, or nil if not found
+// Also returns the bin before the current bin
+func (set *HashSet) findBinPrev(hash HashValue, ptr HashElement) (
+bin *HashSetBin, prev *HashSetBin) {
+  idx := set.hashIndex(hash)
+  prev = nil
+  bin  = set.table[idx]
+  // Follow the chained elements in the bin until the element is equal
+  for bin != nil && !ptr.Equals(bin.elt) {
+    prev  = bin
+    bin   = bin.next
+  }
+  return bin, prev;
+}
+
+  
+
+func (set * HashSet) Insert(hash HashValue, ptr HashElement) (HashElement) {
+  idx := set.hashIndex(hash)
+  
+  // Find the bin with the matching element.
+  bin := set.findBin(hash, ptr)
+  
+  // Create it if necessary.
+  if bin == nil {
+    bin = set.getUnusedBin()
+    bin.hash = hash
+    bin.elt  = ptr //XXX Transformation not called here. Is it needed?
+    
+    bin.next = set.table[idx]
+    set.table[idx] = bin
+    
+    set.entries++
+    
+    // Resize the set if it's full.
+    if set.IsFull() { 
+      set.Resize()
+    }  
+  } else { 
+  // this is not in Chipmunk original but seems like a bug not to do it
+    bin.elt = ptr
+  }  
+  return bin.elt
+}  
+
+func (set * HashSet) Remove(hash HashValue, ptr HashElement) (HashElement) {
+  bin, prev := set.findBinPrev(hash, ptr)
+  // Remove it if it exists.
+  if bin != nil {
+    // Update the previous bin next pointer to point to the next bin.
+    if prev != nil { 
+      prev.next = bin.next
+    }  
+    set.entries--    
+    return_value := bin.elt
+    set.recycleBin(bin)
+    return return_value
+  }  
+  return nil;
+}
+
+func (set * HashSet) Find(hash HashValue, ptr HashElement) (HashElement) {
+  bin := set.findBin(hash, ptr)
+  if bin != nil {
+    return bin.elt 
+  }
+  return set.default_value
+}
+
+type HashSetIterFunc func(bin, data HashElement) 
+
+func (set * HashSet) Each(fun HashSetIterFunc, data HashElement) { 
+  for i:=0 ; i<set.size ; i++ {
+    bin := set.table[i];
+    for bin != nil {
+      next := bin.next;
+      fun(bin.elt, data);
+      bin = next;
+    }
+  }
+}
+
+type HashSetFilterFunc func(bin, data HashElement) (bool)
+
+/* 
+XXX: how to iterate and filter? 
+void
+cpHashSetEach(cpHashSet *set, cpHashSetIterFunc func, void *data)
+{
+  for(int i=0; i<set.size; i++){
+    cpHashSetBin *bin = set.table[i];
+    while(bin){
+      cpHashSetBin *next = bin.next;
+      func(bin.elt, data);
+      bin = next;
+    }
+  }
+}
+
+
+void
+cpHashSetFilter(cpHashSet *set, cpHashSetFilterFunc func, void *data)
+{
+  // Iterate over all the chains.
+  for(int i=0; i<set.size; i++){
+    // The rest works similarly to cpHashSetRemove() above.
+    cpHashSetBin **prev_ptr = &set.table[i];
+    cpHashSetBin *bin = set.table[i];
+    while(bin){
+      cpHashSetBin *next = bin.next;
+      
+      if(func(bin.elt, data)){
+        prev_ptr = &bin.next;
+      } else {
+        (*prev_ptr) = next;
+
+        set.entries--;
+        recycleBin(set, bin);
+      }
+      
+      bin = next;
+    }
+  }
+}
+*/

+ 233 - 0
src/tamias/polyshape.go

@@ -0,0 +1,233 @@
+package tamias
+
+// Axis structure used by cpPolyShape.
+type PolyShapeAxis struct {
+  // normal
+  n Vect
+  // distance from origin
+  d Float
+} 
+
+// Convex polygon shape structure.
+type PolyShape struct {
+  // A PolyShape is a shape
+  * Shape   
+  // Vertex and axis lists.
+  numVerts  int;
+  verts   []Vect;
+  axes    []PolyShapeAxis;
+  
+  // Transformed vertex and axis lists.
+  tVerts  []Vect;
+  tAxes   []PolyShapeAxis;
+}
+
+
+// Returns the minimum distance of the polygon to the axis.
+func (poly * PolyShape) ValueOnAxis(n Vect, d Float) (Float) {
+  verts := poly.tVerts
+  min   := n.Dot(verts[0])  
+ 
+  for  i:=1 ; i< poly.numVerts; i++ { 
+    min = min.Min(n.Dot(verts[i])) 
+  }  
+  
+  return min - d;
+}
+
+func (poly * PolyShape) ContainsVert(v Vect) (bool) {
+  axes := poly.tAxes;
+  for i:=0 ; i<poly.numVerts; i++ {
+    dist := axes[i].n.Dot(v) - axes[i].d;
+    if dist > Float(0.0) { return false; }
+  }  
+  return true
+}
+
+// Same as cpPolyShapeContainsVert() but ignores faces pointing 
+// away from the normal.
+func (poly * PolyShape) ContainsVertPartial(v Vect) (bool) {
+  axes := poly.tAxes;
+  for i:=0 ; i<poly.numVerts; i++ {
+    if axes[i].n.Dot(v) < Float(0.0) { continue; } 
+    dist := axes[i].n.Dot(v) - axes[i].d;
+    if dist > Float(0.0) { return false; } 
+  }  
+  return true
+}
+
+
+func PolyShapeAlloc() (*PolyShape) {
+  return &PolyShape{}
+}
+
+func (poly * PolyShape) TransformVerts(p Vect, rot Vect) {
+  src := poly.verts;
+  dst := poly.tVerts;
+  for i:= 0; i < poly.numVerts; i++ { 
+    dst[i] = p.Add(src[i].Rotate(rot))
+  }  
+}
+
+func (poly * PolyShape) TransformAxes(p Vect, rot Vect) {
+  src := poly.axes
+  dst := poly.tAxes
+  var n Vect
+  for i:= 0; i < poly.numVerts; i++ {
+    n         = src[i].n.Rotate(rot)
+    dst[i].n  = n
+    dst[i].d  = p.Dot(n) + src[i].d
+  }
+}
+
+func (poly * PolyShape) CacheBB(p, rot Vect) (BB) {    
+  var l, b, r, t Float
+  
+  poly.TransformAxes(p, rot)  
+  poly.TransformVerts(p, rot)
+  
+  verts:= poly.tVerts
+  r = verts[0].X
+  l = verts[0].X
+  t = verts[0].Y
+  b = verts[0].Y
+  bb := BBMake(l, t, r, b)
+    
+  // TODO do as part of cpPolyShapeTransformVerts?
+  for i:=1; i < poly.numVerts; i++ {
+    v := verts[i]   
+    bb = bb.Expand(v)
+  }  
+  
+  return bb 
+}
+
+func (poly * PolyShape) Destroy() {
+  poly.axes   = nil
+  poly.tAxes  = nil
+  poly.verts  = nil
+  poly.tVerts = nil
+}  
+
+func (poly * PolyShape) PointQuery(p Vect) (bool) {
+  return poly.Shape.BB.ContainsVect(p) && poly.ContainsVert(p)
+}    
+
+func (poly * PolyShape) SegmentQuery (a, b Vect) (info * SegmentQueryInfo) {
+        
+  axes      := poly.tAxes
+  verts     := poly.tVerts
+  numVerts  := poly.numVerts
+  info       = &SegmentQueryInfo{}
+  
+  var axis PolyShapeAxis  
+  var n, point Vect
+  var an, bn, t, dt, dtMin, dtMax Float  
+    
+  for i:=0 ; i < numVerts; i++ {
+    axis = axes[i]
+    n   = axis.n;
+    an  = a.Dot(n) 
+    if axis.d > an { continue; }
+     
+    bn  = b.Dot(n)
+    t   = axis.d - an / (bn - an)     
+    if t < Float(0.0) || Float(1.0) < t { continue; }
+    
+    point = a.Lerp(b, t) 
+    dt    = - n.Cross(point)
+    dtMin = - n.Cross(verts[i])
+    dtMax = - n.Cross(verts[(i+1) % numVerts])
+    if dtMin <= dt && dt <= dtMax {
+      info.shape = poly.Shape;
+      info.t = t;
+      info.n = n;
+      // ??? do we not need return info here ???
+    }
+  }
+  return info
+}
+
+
+var PolyClass = &ShapeClass{ POLY_SHAPE };
+
+// Validate checks if the polygon is winding correcty.
+func PolyShapeValidate(verts []Vect) (bool) {
+  nverts := len(verts)  
+  for i:=0 ; i < nverts; i++ {
+    bi:= (i+1) % nverts;
+    ci:= (i+2) % nverts;
+    a := verts[i];
+    b := verts[bi];
+    c := verts[ci];
+    if b.Sub(a).Cross(c.Sub(b)) > Float(0.0) { 
+      return false 
+    }   
+  }  
+  return true;
+}
+
+func (poly * PolyShape) NumVerts() (int) {
+  return poly.numVerts
+}  
+
+func (poly * PolyShape) GetVert(idx int) (Vect) {
+  Assert(0 <= idx && idx < poly.NumVerts(), "Index out of range.")
+  return poly.verts[idx]
+}
+
+func (poly * PolyShape) setUpVerts(verts []Vect, offset Vect) {
+  poly.numVerts = len(verts);
+
+  poly.verts    = make([]Vect, poly.numVerts)
+  poly.tVerts   = make([]Vect, poly.numVerts)
+  poly.axes     = make([]PolyShapeAxis, poly.numVerts)
+  poly.tAxes    = make([]PolyShapeAxis, poly.numVerts)
+  nverts       := len(verts)
+  for i:=0 ; i < nverts ; i++ {
+    a := offset.Add(verts[i])
+    bi:= (i+1) % nverts 
+    b := offset.Add(verts[bi])
+    n := b.Sub(a).Perp().Normalize()
+    poly.verts[i]   = a;
+    poly.axes[i].n  = n;
+    poly.axes[i].d  = n.Dot(a)
+  }
+}
+
+func (poly * PolyShape) Init(body * Body, verts []Vect, 
+  offset Vect) (* PolyShape) { 
+  // Fail if the user attempts to pass a concave poly, or a bad winding.
+  Assert(PolyShapeValidate(verts), 
+    "Polygon is concave or has a reversed winding.")  
+  poly.setUpVerts(verts, offset);  
+  poly.Shape = ShapeNew(PolyClass, body) 
+  bb        := poly.CacheBB(body.p, body.rot)  
+  poly.BB    = &bb 
+  return poly;
+}
+
+func PolyShapeNew(body * Body, verts []Vect, offset Vect) (
+  poly * PolyShape) {
+  return PolyShapeAlloc().Init(body, verts, offset) 
+}  
+
+func (poly * PolyShape) BoxInit(body * Body, width, height Float) (
+      * PolyShape) {
+  hw     := width   / Float(2.0)
+  hh     := height  / Float(2.0)
+  verts  := [4]Vect { V(-hw, -hh), V(-hw, hh), V(hw, hh), V(hw, -hh) }
+  poly.Init(body, (verts[0:len(verts)]), VZERO)
+  return poly
+}  
+
+func BoxShapeNew(body * Body, width, height Float) (poly * PolyShape) {
+  return PolyShapeAlloc().BoxInit(body, width, height) 
+}  
+
+// Unsafe API (chipmunk_unsafe.h)
+func (poly * PolyShape) SetVerts(numVerts int, verts []Vect, offset Vect) {
+  poly.Destroy()
+  poly.setUpVerts(verts, offset);
+}
+

+ 454 - 0
src/tamias/shape.go

@@ -0,0 +1,454 @@
+package tamias
+
+type SegmentQueryInfo struct {
+  // shape that was hit, nil if no collision
+  shape * Shape
+  // Distance along query segment, will always be in the range [0, 1].
+  t Float
+  // normal of hit surface
+  n Vect 
+} 
+
+// Collision type, etc
+type CollisionType int
+type GroupType int
+type LayerType int
+
+// Enumeration of shape types.
+type ShapeType int
+
+const (
+  CIRCLE_SHAPE	= ShapeType(0)
+  SEGMENT_SHAPE	= ShapeType(1)
+  POLY_SHAPE		= ShapeType(2)
+  NUM_SHAPES		= ShapeType(3)
+  NO_GROUP		  = GroupType(0)  
+)  
+
+const (
+  LAYER_0		  = 1 << iota
+  LAYER_1		  = 1 << iota
+  LAYER_2		  = 1 << iota
+  LAYER_3		  = 1 << iota
+  LAYER_4		  = 1 << iota
+  LAYER_5		  = 1 << iota
+  LAYER_6		  = 1 << iota
+  LAYER_7		  = 1 << iota
+  LAYER_8		  = 1 << iota
+  LAYER_9		  = 1 << iota
+  LAYER_10		= 1 << iota
+  LAYER_11		= 1 << iota
+  LAYER_12		= 1 << iota
+  LAYER_13		= 1 << iota
+  LAYER_14    = 1 << iota
+  LAYER_15    = 1 << iota
+  LAYER_16    = 1 << iota 
+  ALL_LAYERS	= -1
+)
+
+
+// Shape class is probably not needed, but used for now for storng 
+// typeinfo
+type ShapeClass struct {	
+  Type ShapeType
+}
+
+// Basic shape struct that the others inherit from.
+type Shape struct {
+  // The "class" of a shape as defined above 
+  * ShapeClass
+  // cpBody that the shape is attached to.
+  * Body
+  // Cached BBox for the shape.
+  * BB
+  // Sensors invoke callbacks, but do not generate collisions
+  sensor bool
+  // *** Surface properties.
+  // Coefficient of restitution. (elasticity)
+  e Float;
+  // Coefficient of friction.
+  u Float;
+  // Surface velocity used when solving for friction.
+  surface_v Vect;
+  // *** User Definable Fields
+  // User defined data pointer for the shape.
+  data DataPointer
+  // User defined collision type for the shape.
+  collision_type CollisionType
+  // User defined collision group for the shape.
+  group GroupType
+  // User defined layer bitmask for the shape.
+  layers LayerType
+  // *** Internally Used Fields
+  // Unique id used as the hash value.
+  hashid HashValue
+}
+
+// Circle shape structure.
+type CircleShape struct {
+  * Shape
+  // Center in body space coordinates
+  c Vect
+  // Radius.
+  r Float
+  // Transformed center. (world space coordinates)
+  tc Vect;
+} 
+
+// Segment shape structure.
+type SegmentShape struct {
+  * Shape
+  // Endpoints and normal of the segment. (body space coordinates)
+  a, b, n Vect
+  // Radius of the segment. (Thickness)
+  r Float
+  // Transformed endpoints and normal. (world space coordinates)
+  ta, tb, tn Vect
+} 
+
+// Returns true if a shape was hit, false if not
+func (info *SegmentQueryInfo) Hit() (bool) {
+  return info.shape != nil 
+}
+
+
+func (info *SegmentQueryInfo) HitPoint(start, end Vect) (Vect) {
+	return start.Lerp(end, info.t) 
+}
+
+func (info *SegmentQueryInfo) HitDist(start Vect, end Vect) (Float) {
+	return start.Dist(end) * info.t
+}
+
+var ( 
+  SHAPE_ID_COUNTER HashValue = HashValue(0)
+)  
+
+func ResetShapeIdCounter() {
+	SHAPE_ID_COUNTER = HashValue(0)
+}
+
+func (shape * Shape) Init(klass *ShapeClass, body *Body) (*Shape){
+	shape.ShapeClass = klass	
+	shape.hashid 	   = SHAPE_ID_COUNTER
+	SHAPE_ID_COUNTER++	
+	
+	shape.Body 	     = body
+	shape.sensor 	   = false
+	
+	shape.e 	       = Float(0.0)
+	shape.u 	       = Float(0.0)
+	shape.surface_v  = VZERO
+	
+	shape.collision_type 	= 0
+	shape.group 		= NO_GROUP
+	shape.layers 		= ALL_LAYERS
+	shape.data 		  = nil
+	return shape;
+}
+
+func ShapeNew(klass *ShapeClass, body *Body) (*Shape) {
+  return new(Shape).Init(klass, body)
+}
+
+func (shape * Shape) CacheBB(p Vect, rot Vect) (BB) {
+  return BBMake(p.X, p.Y, p.X, p.Y)
+}
+
+func (shape * Shape) GetBB() (*BB) {
+  return shape.BB
+}
+
+func (shape * Shape) Destroy() {
+}
+
+func (shape * Shape) Free() {
+}
+
+/* These are done differently in Go language
+cpBB
+cpShapeCacheBB(cpShape *shape)
+{
+	cpBody *body = shape->body;
+	
+	shape->bb = shape->klass->cacheData(shape, body->p, body->rot);
+	return shape->bb;
+}
+
+int
+cpShapePointQuery(cpShape *shape, cpVect p){
+	return shape->klass->pointQuery(shape, p);
+}
+
+int
+cpShapeSegmentQuery(cpShape *shape, cpVect a, cpVect b, cpSegmentQueryInfo *info){
+	cpSegmentQueryInfo blank = {NULL, 0.0f, cpvzero};
+	(*info) = blank;
+	
+	shape->klass->segmentQuery(shape, a, b, info);
+	return (info->shape != NULL);
+}
+
+
+
+void
+cpSegmentQueryInfoPrint(cpSegmentQueryInfo *info)
+{
+	printf("Segment Query:\n");
+	printf("\tt: %f\n", info->t);
+//	printf("\tdist: %f\n", info->dist);
+//	printf("\tpoint: %s\n", cpvstr(info->point));
+	printf("\tn: %s\n", cpvstr(info->n));
+}
+*/
+
+
+
+
+func CircleShapeAlloc() (* CircleShape) {
+	return &CircleShape{};
+}
+
+ 
+func bbFromCircle(c Vect, r Float) (BB) {
+	return BBMake(c.X-r, c.Y-r, c.X+r, c.Y+r);
+}
+
+func (circle * CircleShape) CacheBB(p Vect, rot Vect) (BB) {
+  circle.tc	=	p.Add(circle.c.Rotate(rot))
+  return bbFromCircle(circle.tc, circle.r)
+}
+
+func (circle * CircleShape) PointQuery(p Vect) (bool) {	
+	return circle.tc.Near(p, circle.r)
+}
+
+
+
+func CircleSegmentQuery(shape *Shape, center Vect, r Float, a, b Vect) (info * SegmentQueryInfo) {
+	// umm... gross I normally frown upon such things (sic)
+	aa := a.Sub(center);
+	bb := b.Sub(center);
+	
+	qa := aa.Dot(aa) - Float(2.0)*aa.Dot(bb) + bb.Dot(bb)
+	qb := Float(-2.0)*aa.Dot(aa) + Float(2.0)*aa.Dot(bb)
+	qc := aa.Dot(aa) - r*r;
+	
+	det:= qb*qb - Float(4.0)*qa*qc
+	info= &SegmentQueryInfo{}	
+	if det >= Float(0.0) {
+		t := (-qb - det.Sqrt())/(Float(2.0)*qa);
+		if Float(0.0) <= t && t <= Float(1.0) { 
+			info.shape = shape
+			info.t = t
+			info.n = a.Lerp(b, t).Normalize()
+		}
+	}
+  return info 
+}
+
+
+func (circle * CircleShape) SegmentQuery(a, b Vect) (info * SegmentQueryInfo) {
+  return CircleSegmentQuery(circle.Shape, circle.c, circle.r, a, b)
+}
+
+var CircleShapeClass *ShapeClass = &ShapeClass{ CIRCLE_SHAPE }
+
+
+func (circle * CircleShape) Init(body * Body, radius Float, offset Vect) (* CircleShape) {
+	circle.c = offset;
+	circle.r = radius;	
+	circle.Shape.Init(CircleShapeClass, body);	
+	return circle;
+}
+
+
+func CircleShapeNew(body * Body, radius Float, offset Vect) (*CircleShape) {
+  return CircleShapeAlloc().Init(body, radius, offset)
+}
+
+func (circle * CircleShape) Radius() (Float) {
+  return circle.r
+}
+
+func (circle * CircleShape) Offset() (Vect) {
+  return circle.c
+}
+
+func SegmentShapeAlloc() (*SegmentShape) {
+  return &SegmentShape{}
+}
+
+func (seg * SegmentShape) CacheBB(p, rot Vect) (BB) {
+  var l, r, s, t Float
+  
+  seg.ta = p.Add(seg.a.Rotate(rot));
+  seg.tb = p.Add(seg.b.Rotate(rot));
+  seg.tn = p.Add(seg.n.Rotate(rot));
+    
+  if(seg.ta.X < seg.tb.X){
+    l = seg.ta.X
+    r = seg.tb.X
+  } else {
+    l = seg.tb.X
+    r = seg.ta.X
+  }
+  
+  if(seg.ta.Y < seg.tb.Y){
+    s = seg.ta.Y
+    t = seg.tb.Y
+  } else {
+    s = seg.tb.Y
+    t = seg.ta.Y
+  }
+  
+  rad := seg.r
+  return BBMake(l - rad, s - rad, r + rad, t + rad)
+}
+
+func (seg * SegmentShape) PointQuery(p Vect) (bool) {
+  if !seg.BB.ContainsVect(p) { 
+    return false 
+  } 
+   
+  // Calculate normal distance from segment.
+  dn    := seg.tn.Dot(p) - seg.ta.Dot(seg.tn)
+  dist  := dn.Abs()      - seg.r
+  if dist > 0.0 { 
+    return false 
+  }
+  
+  // Calculate tangential distance along segment.
+  dt    := - seg.tn.Cross(p)
+  dtMin := - seg.tn.Cross(seg.ta)     
+  dtMax := - seg.tn.Cross(seg.tb)
+  
+  // Decision tree to decide which feature of the segment to collide with.
+  if dt <= dtMin {
+    if dt < (dtMin - seg.r) {
+      return false
+    } else {
+      return seg.ta.Sub(p).Lengthsq() < (seg.r * seg.r) 
+    }
+  } else {
+    if dt < dtMax {
+      return true;
+    } else {
+      if dt < (dtMax + seg.r) {
+        return seg.tb.Sub(p).Lengthsq() < (seg.r * seg.r)
+      } else {
+        return false
+      }
+    }
+  }  
+  return true
+}
+
+
+func (seg * SegmentShape) SegmentQuery(center Vect, a, b Vect) (info * SegmentQueryInfo) {
+  
+  n := seg.tn;
+  // flip n if a is behind the axis
+  if a.Dot(n) < seg.ta.Dot(n) { 
+    n = n.Neg()
+  }  
+  
+  info = &SegmentQueryInfo{}
+  an  := a.Dot(n)
+  bn  := b.Dot(n)
+  d   := seg.ta.Dot(n) + seg.r
+  t   := (d - an)/(bn - an)
+  
+  if Float(0.0) < t && t < Float(1.0) {    
+    point := a.Lerp(b,t)
+    dt    := - seg.tn.Cross(point)
+    dtMin := - seg.tn.Cross(seg.ta)
+    dtMax := - seg.tn.Cross(seg.tb)
+        
+    if(dtMin < dt && dt < dtMax){
+      info.shape = seg.Shape
+      info.t = t;
+      info.n = n;
+      
+      return info; // don't continue on and check endcaps
+    }
+  }
+  
+  if seg.r > 0.0 {
+    info1 := CircleSegmentQuery(seg.Shape, seg.ta, seg.r, a, b)
+    info2 := CircleSegmentQuery(seg.Shape, seg.tb, seg.r, a, b)
+    
+    if info1.Hit() && !info2.Hit() {
+      info = info1
+    } else if info2.Hit() && !info1.Hit() {
+      info = info2
+    } else if info1.Hit() && info2.Hit() {
+      if info1.t < info2.t {
+        info = info1;
+      } else {
+        info = info2;
+      }
+    }
+  }
+  // return empty info
+  return info 
+}
+
+
+var SegmentShapeClass * ShapeClass = &ShapeClass {SEGMENT_SHAPE}
+
+func (seg *SegmentShape) Init(body * Body, a, b Vect, r Float) (*SegmentShape) {
+  seg.a = a
+  seg.b = b
+  seg.n = b.Sub(a).Normalize().Perp()
+  seg.r = r;  
+  seg.Shape.Init(SegmentShapeClass, body)  
+  return seg;
+}
+
+func SegmentShapeNew(body * Body, a, b Vect, r Float) (*SegmentShape) {
+  return SegmentShapeAlloc().Init(body, a, b, r)
+}
+
+func (seg * SegmentShape) A() (Vect) {
+  return seg.a
+}
+
+func (seg * SegmentShape) B() (Vect) {
+  return seg.b
+}
+
+func (seg * SegmentShape) Normal() (Vect) {
+  return seg.n
+}
+
+func (seg * SegmentShape) Radius() (Float) {
+  return seg.r
+}
+
+
+// Unsafe API 
+func (circle * CircleShape) SetRadius(r Float) (Float) {
+  circle.r = r
+  return circle.r
+}
+
+func (circle * CircleShape) SetOffset(o Vect) (Vect) {
+  circle.c = o
+  return circle.c
+}
+
+
+func (seg * SegmentShape) Setendpoints(a, b Vect) {
+  seg.a = a
+  seg.b = b
+  seg.n = b.Sub(a).Normalize().Perp()
+}
+
+func (seg * SegmentShape) SetRadius(r Float) (Float) {
+  seg.r = r
+  return seg.r
+}
+
+
+

+ 789 - 0
src/tamias/space.go

@@ -0,0 +1,789 @@
+package tamias
+
+
+// Number of frames that contact information should persist.
+var CONTACT_PERSISTENCE = 1
+
+// User collision handler function types.
+type CollisionFunc func(arb * Arbiter, space * Space, data interface{}) (int)
+
+// Structure for holding collision pair function information.
+// Used internally.
+type CollisionHandler struct {
+  a, b CollisionType  
+  begin, preSolve, postSolve, separate CollisionFunc
+  data interface {}
+}
+
+const CP_MAX_CONTACTS_PER_ARBITER = 6
+
+type ContactBufferHeader struct {
+  stamp       int
+  next        * ContactBufferHeader
+  numContacts uint
+} 
+
+type CollisionFuncMap map[HashValue] CollisionFunc
+
+type ContactMap map[HashValue] Contact
+
+type Space struct {
+  // *** User definable fields  
+  // Number of iterations to use in the impulse solver to solve contacts.
+  Iterations int
+  
+  // Number of iterations to use in the impulse solver to solve elastic collisions.
+  ElasticIterations int
+  
+  // Default gravity to supply when integrating rigid body motions.
+  Gravity Vect
+  
+  // Default damping to supply when integrating rigid body motions.
+  Damping Float
+  
+  // *** Internally Used Fields  
+  // When the space is locked, you should not add or remove objects;  
+  locked int
+  
+  // Time stamp. Is incremented on every call to cpSpaceStep().
+  stamp int
+
+  // The static and active shape spatial hashes.
+  staticShapes SpaceHash
+  activeShapes SpaceHash
+  
+  // List of bodies in the system.
+  bodies *Array
+  
+  // List of active arbiters for the impulse solver.
+  arbiters, pooledArbiters *Array; 
+  
+  // Linked list ring of contact buffers.
+  // Head is the current buffer. Tail is the oldest buffer.
+  // The list points in the direction of tail.head.
+  contactBuffersHead, contactBuffersTail *ContactBufferHeader;
+  
+  // List of buffers to be free()ed when destroying the space.
+  // Not needed in Go
+  // cpArray *allocatedBuffers;
+  
+  // Persistant contact set.
+  contactSet ContactMap;
+  
+  // List of constraints in the system.
+  constraints *Array;
+  
+  // Set of collisionpair functions.
+  collFuncSet CollisionFuncMap;
+  // Default collision handler.
+  defaultHandler CollisionHandler;
+    
+  postStepCallbacks *HashSet;  
+  
+} 
+
+/*
+
+type contactSet [2]*Shape;
+
+// Equal function for contactSet.
+func (set * contactSet) Equals(arbi interface {}) bool { 
+  arb, ok := arbi.(*Arbiter)
+  if !ok { return false; }
+  a       := set[0]
+  b       := set[1]
+  if a == arb.private_a && b == arb.private_b { return true }
+  if b == arb.private_a && a == arb.private_b { return true }
+  return false
+}   
+
+// Transformation function for contactSet.
+// Not needed, I think. 
+// contactSetTrans(cpShape **shapes, cpSpace *space)
+func (check * CollisionHandler) Equals(pairi interface {}) bool {
+  pair, ok := pairi.(*CollisionHandler)
+  if !ok { return false; }
+  if pair.a == check.a && pair.b == check.b { return true }
+  if pair.b == check.a && pair.a == check.b { return true }
+  return false  
+}
+
+
+type PostStepFunc func(obj, data interface{})
+
+type postStepCallback struct {
+  fun PostStepFunc
+  obj, data interface{}
+}
+
+func (a * postStepCallback) Equals(bi interface{}) bool { 
+  b, ok := arbi.(*CollisionHandler)
+  if !ok { return false; }
+  return a.obj == b.obj   
+}
+
+// Default collision functions.
+func alwaysCollide(arb * Arbiter, space * Space, data interface{}) (int) { 
+  return 1;
+}
+
+func nothing(arb * Arbiter, space * Space, data interface{}) (int) {
+  return 0;
+}
+
+func (shape * Shape) GetBB() (BB) {
+  return shape.BB
+}  
+
+const CONTACTS_BUFFER_SIZE = 100
+
+type ContactBuffer struct {
+  header ContactBufferHeader;
+  contacts [CONTACTS_BUFFER_SIZE]Contact;
+}
+
+func AllocContactBufferHeader() (*ContactBufferHeader) {
+  return &ContactBufferHeader{}
+}
+
+func (header *ContactBufferHeader) Init(space * Space) (*ContactBufferHeader) {
+  header.stamp        = space.stamp
+  header.next         = space.contactBuffersTail
+  header.numContacts  = 0
+  return header
+}
+
+func (header *ContactBufferHeader) ContactBufferHeaderNew(space * Space) (
+      *ContactBufferHeader) {
+  return AllocContactBufferHeader().Init(space) 
+}  
+
+func SpaceAlloc() (*Space) {
+  return &Space{}  
+}
+
+var DEFAULT_DIM_SIZE            = Float(100.0)
+var DEFAULT_COUNT               = 1000
+var DEFAULT_ITERATIONS          = 10
+var DEFAULT_ELASTIC_ITERATIONS  = 0
+
+
+var defaultHandler = CollisionHandler{ 0, 0, alwaysCollide, alwaysCollide, nothing, nothing, nil};
+
+func (space *Space) Init() (*Space) {
+  space.iterations        = DEFAULT_ITERATIONS
+  space.elasticIterations = DEFAULT_ELASTIC_ITERATIONS
+  space.Gravity           = VZERO
+  space.Damping           = Float(1.0)
+  space.locked            = 0
+  space.stamp             = 0
+  space.staticShapes      = SpaceHashNew(DEFAULT_DIM_SIZE, DEFAULT_COUNT)
+  space.activeShapes      = SpaceHashNew(DEFAULT_DIM_SIZE, DEFAULT_COUNT)
+  space.bodies            = ArrayNew(0)
+  space.arbiters          = ArrayNew(0)
+  space.pooledArbiters    = ArrayNew(0)
+  header                 := ContactBufferHeaderNew(space)
+  space.contactBuffersTail= header
+  space.contactBuffersHead= header
+  header.next             = header 
+  // set up ring buffer in cyclical way
+  space.contactSet        = make(ContactSet)  
+  space.constraints       = ArrayNew(0)
+  space.defaultHandler    = defaultHandler
+  space.collFuncSet       = make(CollisionFuncMap)
+  space.postStepCallbacks = HashSetNew(0)
+  
+  return space
+}  
+
+func (space *Space) New() (*Space) {
+  return SpaceAlloc().Init()
+}
+
+
+func (space * Space) AddCollisionHandler(a, b CollisionType,
+  begin, preSolve, postSolve, separate CollisionFunc, data interface{}) {
+  // Remove any old function so the new one will get added.
+  space.RemoveCollisionHandler(space, a, b)
+  handler = CollisionHandler { a , b, begin, 
+            presolve, postSolve, separate , data } 
+  space.collFuncSet[HASH_PAIR(a, b)] = &handler
+}
+
+func (space * Space)RemoveCollisionHandler(a, b CollisionType) {
+  space.collFuncSet[HASH_PAIR(a, b)] = nil, false
+}
+
+func (space * Space) SetDefaultHandler( a, b CollisionType, 
+  begin, preSolve, postSolve, separate CollisionFunc, data interface{}) {
+  // Remove any old function so the new one will get added.
+  space.RemoveCollisionHandler(space, a, b)
+  handler = CollisionHandler { a , b, begin, 
+            presolve, postSolve, separate , data }             
+  space.defaultHandler = &handler
+}
+
+
+func (space * Space) AssertUnlocked() {
+  Assert(!space.locked,  "This addition/removal cannot be done safely during a call to cpSpaceStep(). Put these calls into a Post Step Callback.")
+}
+  
+func (space * Space) AddShape(shape * Shape) (* Shape) {
+  Assert(shape.body != nil, "Cannot add a shape with a nil body.")
+  old := space.activeShapes.Find(shape, shape.hashid)
+  Assert(old == nil, "Cannot add the same shape more than once")
+  space.AssertUnlocked()
+  shape.CacheBB()
+  space.activeShapes.Insert(shape, shape.hashid, shape.bb)
+  return shape
+}
+  
+func (space * Space) AddStaticShape(shape * Shape) (* Shape) {
+  Assert(shape.body != nil, "Cannot add a static shape with a nil body.")
+  old := space.staticShapes.Find(shape, shape.hashid)
+  Assert(old == nil, "Cannot add the same static shape more than once")
+  space.AssertUnlocked()
+  shape.CacheBB()
+  space.staticShapes.Insert(shape, shape.hashid, shape.bb)
+  return shape
+}
+
+
+func (space * Space) AddBody(body * Body) (* Body) {
+  Assert(!space.bodies.Contains(body), 
+          "Cannot add the same body more than once.")
+  space.Bodies.Push(body)
+  return body
+}
+
+
+func (space * Space) AddConstraint(constraint * Constraint) {
+  Assert(!space.constraints.Contains(constraint), "Cannot add the same constraint more than once.")  
+  space.constraints.Push(constraint);  
+  return constraint;
+}
+
+typedef struct removalContext {
+  cpSpace *space;
+  cpShape *shape;
+} removalContext;
+
+// Hashset filter func to throw away old arbiters.
+static int
+contactSetFilterRemovedShape(cpArbiter *arb, removalContext *context)
+{
+  if(context.shape == arb.private_a || context.shape == arb.private_b){
+    arb.handler.separate(arb, context.space, arb.handler.data);
+    cpArrayPush(context.space.pooledArbiters, arb);
+    return 0;
+  }
+  
+  return 1;
+}
+
+
+func (space * Space) RemoveShape(shape * Shape) {
+  space.AssertUnlocked()    
+  for k, v := range space.contactSet { 
+    if shape == v.private_a || shape == v.private_b {
+      v.handler.separate(v, space, v.handler.data)
+      space.contactSet[k] = nil, false
+      // remove all contacts of this shape 
+    }
+  }  
+  space.activeShapes.Remove(shape, shape.hashid)
+}
+
+func (space * Space) RemoveStaticShape(shape * Shape) {
+  space.AssertUnlocked()
+  for k, v := range space.contactSet { 
+    if shape == v.private_a || shape == v.private_b {
+      v.handler.separate(v, space, v.handler.data)
+      space.contactSet[k] = nil, false
+      // remove all contacts of this shape 
+    }
+  }  
+  space.staticShapes.Remove(shape, shape.hashid)
+}
+
+
+func (space * Space) RemoveBody(body * Body) {
+  space.AssertUnlocked()
+  space.bodies.DeleteObj(body)  
+}
+
+func (space * Space) RemoveConstraint(constraint * Constraint) {
+  space.AssertUnlocked()
+  space.constraints.DeleteObj(constraint)  
+}
+
+func (space * Space) AddPostStepCallback(fun * PostStepFunc, 
+      obj, data interface{}) {
+  callback :=  &postStepCallback{fun, obj, data};
+  space.postStepCallbacks.Insert(callback, HashValue(obj))
+  // BDM: This also sucks
+}
+
+func removeShape(shape * Shape, space * space) {
+   space.RemoveShape(shape)
+}   
+
+func (space * Space) PostStepRemoveShape(shape * shape) {
+  shape.AddPostStepCallback(removeShape, shape, space)
+} 
+
+type pointQueryContext struct {
+  layers  Layers;
+  group   Group;
+  fun     SpacePointQueryFunc;
+  data    interface{};
+}
+
+func pointQueryHelper(point * Vect, shape * Shape, 
+      context * pointQueryContext) (bool)  {
+
+  if shape.group > 0 && context.group == shape.group  { return false } 
+  // no collision is in the same nonzero group
+  if (context.layers & shape.layers) == 0  { return false }
+  // no collision if in different, non-overlapping layers
+  
+  // call the callback if the shape responds true to the point query
+  if shape.PointQuery(*point) {   
+    return context.fun(shape, context.data)
+  } 
+}
+
+func (space * Space) SpacePointQuery(point Vect, layers Layers, 
+  group Group, fun SpacePointQueryFunc, data interface{}) {
+  context := pointQueryContext{layers, group, fun, data};
+  space.activeShapes.PointQuery(point, pointQueryHelper, &context);
+  space.staticShapes.PointQuery(point, pointQueryHelper, &context);
+}
+
+/*
+Allthis sucks a bit. Better recode it to be more Go-like
+static void
+rememberLastPointQuery(cpShape *shape, cpShape **outShape)
+{
+  (*outShape) = shape;
+}
+
+cpShape *
+cpSpacePointQueryFirst(cpSpace *space, cpVect point, cpLayers layers, cpGroup group)
+{
+  cpShape *shape = NULL;
+  cpSpacePointQuery(space, point, layers, group, (cpSpacePointQueryFunc)rememberLastPointQuery, &shape);
+  
+  return shape;
+}
+*/
+
+func (space * Space) EachBody() (chan *Body) {
+  out := make(chan * Body)
+  go func() {   
+    for i:=0; i < space.bodies.Size(); i++ {
+      out <- space.bodies.Index(i).(*Body)
+    } 
+  }()
+  return out
+} 
+
+/*
+typedef struct segQueryContext {
+  cpVect start, end;
+  cpLayers layers;
+  cpGroup group;
+  cpSpaceSegmentQueryFunc func;
+  int anyCollision;
+} segQueryContext;
+
+static cpFloat
+segQueryFunc(segQueryContext *context, cpShape *shape, void *data)
+{
+  cpSegmentQueryInfo info;
+  
+  if(
+    !(shape.group && context.group == shape.group) && (context.layers&shape.layers) &&
+    cpShapeSegmentQuery(shape, context.start, context.end, &info)
+  ){
+    if(context.func){
+      context.func(shape, info.t, info.n, data);
+    }
+    
+    context.anyCollision = 1;
+  }
+  
+  return 1.0f;
+}
+
+int
+cpSpaceSegmentQuery(cpSpace *space, cpVect start, cpVect end, cpLayers layers, cpGroup group, cpSpaceSegmentQueryFunc func, void *data)
+{
+  segQueryContext context = {
+    start, end,
+    layers, group,
+    func,
+    0,
+  };
+  
+  cpSpaceHashSegmentQuery(space.staticShapes, &context, start, end, 1.0f, (cpSpaceHashSegmentQueryFunc)segQueryFunc, data);
+  cpSpaceHashSegmentQuery(space.activeShapes, &context, start, end, 1.0f, (cpSpaceHashSegmentQueryFunc)segQueryFunc, data);
+  
+  return context.anyCollision;
+}
+
+typedef struct segQueryFirstContext {
+  cpVect start, end;
+  cpLayers layers;
+  cpGroup group;
+} segQueryFirstContext;
+
+static cpFloat
+segQueryFirst(segQueryFirstContext *context, cpShape *shape, cpSegmentQueryInfo *out)
+{
+  cpSegmentQueryInfo info;// = {NULL, 1.0f, cpvzero};
+  if(
+    !(shape.group && context.group == shape.group) && (context.layers&shape.layers) &&
+    cpShapeSegmentQuery(shape, context.start, context.end, &info)
+  ){
+    if(info.t < out.t){
+      out.shape = info.shape;
+      out.t = info.t;
+      out.n = info.n;
+    }
+    
+    return info.t;
+  }
+  
+  return 1.0f;
+}
+
+cpShape *
+cpSpaceSegmentQueryFirst(cpSpace *space, cpVect start, cpVect end, cpLayers layers, cpGroup group, cpSegmentQueryInfo *out)
+{
+  cpSegmentQueryInfo info = {NULL, 1.0f, cpvzero};
+  if(out){
+    (*out) = info;
+  } else {
+    out = &info;
+  }
+  
+  out.t = 1.0f;
+  
+  segQueryFirstContext context = {
+    start, end,
+    layers, group
+  };
+  
+  cpSpaceHashSegmentQuery(space.staticShapes, &context, start, end, 1.0f, (cpSpaceHashSegmentQueryFunc)segQueryFirst, out);
+  cpSpaceHashSegmentQuery(space.activeShapes, &context, start, end, out.t, (cpSpaceHashSegmentQueryFunc)segQueryFirst, out);
+  
+  return out.shape;
+}
+
+#pragma mark BB Query functions
+
+typedef struct bbQueryContext {
+  cpLayers layers;
+  cpGroup group;
+  cpSpaceBBQueryFunc func;
+  void *data;
+} bbQueryContext;
+
+static void 
+bbQueryHelper(cpBB *bb, cpShape *shape, bbQueryContext *context)
+{
+  if(
+    !(shape.group && context.group == shape.group) && (context.layers&shape.layers) &&
+    cpBBintersects(*bb, shape.bb)
+  ){
+    context.func(shape, context.data);
+  }
+}
+
+void
+cpSpaceBBQuery(cpSpace *space, cpBB bb, cpLayers layers, cpGroup group, cpSpaceBBQueryFunc func, void *data)
+{
+  bbQueryContext context = {layers, group, func, data};
+  cpSpaceHashQuery(space.activeShapes, &bb, bb, (cpSpaceHashQueryFunc)bbQueryHelper, &context);
+  cpSpaceHashQuery(space.staticShapes, &bb, bb, (cpSpaceHashQueryFunc)bbQueryHelper, &context);
+}
+
+#pragma mark Spatial Hash Management
+
+// Iterator function used for updating shape BBoxes.
+static void
+updateBBCache(cpShape *shape, void *unused)
+{
+  cpShapeCacheBB(shape);
+}
+
+void
+cpSpaceResizeStaticHash(cpSpace *space, cpFloat dim, int count)
+{
+  cpSpaceHashResize(space.staticShapes, dim, count);
+  cpSpaceHashRehash(space.staticShapes);
+}
+
+void
+cpSpaceResizeActiveHash(cpSpace *space, cpFloat dim, int count)
+{
+  cpSpaceHashResize(space.activeShapes, dim, count);
+}
+
+void 
+cpSpaceRehashStatic(cpSpace *space)
+{
+  cpSpaceHashEach(space.staticShapes, (cpSpaceHashIterator)&updateBBCache, NULL);
+  cpSpaceHashRehash(space.staticShapes);
+}
+
+#pragma mark Collision Detection Functions
+
+static cpContactBufferHeader *
+cpSpaceGetFreeContactBuffer(cpSpace *space)
+{
+  if(space.stamp - space.contactBuffersTail.stamp > cp_contact_persistence){
+    cpContactBufferHeader *header = space.contactBuffersTail;
+    space.contactBuffersTail = header.next;
+    
+    return cpContactBufferHeaderInit(header, space);
+  } else {
+    cpContactBufferHeader *header = cpSpaceAllocContactBuffer(space);
+    return cpContactBufferHeaderInit(header, space);
+  }
+}
+
+static void
+cpSpacePushNewContactBuffer(cpSpace *space)
+{
+//  for(cpContactBuffer *buffer = space.contactBuffersTail; buffer != space.contactBuffersHead; buffer = buffer.next){
+//    printf("%p . ", buffer);
+//  }
+//  printf("%p (head)\n", space.contactBuffersHead);
+  
+  cpContactBufferHeader *buffer = cpSpaceGetFreeContactBuffer(space);
+  space.contactBuffersHead.next = buffer;
+  space.contactBuffersHead = buffer;
+}
+
+static inline int
+queryReject(cpShape *a, cpShape *b)
+{
+  return
+    // BBoxes must overlap
+    !cpBBintersects(a.bb, b.bb)
+    // Don't collide shapes attached to the same body.
+    || a.body == b.body
+    // Don't collide objects in the same non-zero group
+    || (a.group && b.group && a.group == b.group)
+    // Don't collide objects that don't share at least on layer.
+    || !(a.layers & b.layers);
+}
+
+// Callback from the spatial hash.
+static void
+queryFunc(cpShape *a, cpShape *b, cpSpace *space)
+{
+  // Reject any of the simple cases
+  if(queryReject(a,b)) return;
+  
+  // Find the collision pair function for the shapes.
+  struct{cpCollisionType a, b;} ids = {a.collision_type, b.collision_type};
+  cpHashValue collHashID = CP_HASH_PAIR(a.collision_type, b.collision_type);
+  cpCollisionHandler *handler = (cpCollisionHandler *)cpHashSetFind(space.collFuncSet, collHashID, &ids);
+  
+  int sensor = a.sensor || b.sensor;
+  if(sensor && handler == &space.defaultHandler) return;
+  
+  // Shape 'a' should have the lower shape type. (required by cpCollideShapes() )
+  if(a.klass.type > b.klass.type){
+    cpShape *temp = a;
+    a = b;
+    b = temp;
+  }
+  
+  if(space.contactBuffersHead.numContacts + CP_MAX_CONTACTS_PER_ARBITER > CP_CONTACTS_BUFFER_SIZE){
+    // contact buffer could overflow on the next collision, push a fresh one.
+    cpSpacePushNewContactBuffer(space);
+  }
+  
+  // Narrow-phase collision detection.
+  cpContact *contacts = ((cpContactBuffer *)(space.contactBuffersHead)).contacts + space.contactBuffersHead.numContacts;
+  int numContacts = cpCollideShapes(a, b, contacts);
+  if(!numContacts) return; // Shapes are not colliding.
+  space.contactBuffersHead.numContacts += numContacts;
+  
+  // Get an arbiter from space.contactSet for the two shapes.
+  // This is where the persistant contact magic comes from.
+  cpShape *shape_pair[] = {a, b};
+  cpHashValue arbHashID = CP_HASH_PAIR((size_t)a, (size_t)b);
+  cpArbiter *arb = (cpArbiter *)cpHashSetInsert(space.contactSet, arbHashID, shape_pair, space);
+  cpArbiterUpdate(arb, contacts, numContacts, handler, a, b); // retains the contacts array
+  
+  // Call the begin function first if it's the first step
+  if(arb.stamp == -1 && !handler.begin(arb, space, handler.data)){
+    cpArbiterIgnore(arb); // permanently ignore the collision until separation
+  }
+  
+  if(
+    // Ignore the arbiter if it has been flagged
+    (arb.state != cpArbiterStateIgnore) && 
+    // Call preSolve
+    handler.preSolve(arb, space, handler.data) &&
+    // Process, but don't add collisions for sensors.
+    !sensor
+  ){
+    cpArrayPush(space.arbiters, arb);
+  } else {
+//    cpfree(arb.contacts);
+    space.contactBuffersHead.numContacts -= numContacts;
+    arb.contacts = NULL;
+    arb.numContacts = 0;
+  }
+  
+  // Time stamp the arbiter so we know it was used recently.
+  arb.stamp = space.stamp;
+}
+
+// Iterator for active/static hash collisions.
+static void
+active2staticIter(cpShape *shape, cpSpace *space)
+{
+  cpSpaceHashQuery(space.staticShapes, shape, shape.bb, (cpSpaceHashQueryFunc)queryFunc, space);
+}
+
+// Hashset filter func to throw away old arbiters.
+static int
+contactSetFilter(cpArbiter *arb, cpSpace *space)
+{
+  int ticks = space.stamp - arb.stamp;
+  
+  // was used last frame, but not this one
+  if(ticks == 1){
+    arb.handler.separate(arb, space, arb.handler.data);
+    arb.stamp = -1; // mark it as a new pair again.
+  }
+  
+  if(ticks >= cp_contact_persistence){
+    cpArrayPush(space.pooledArbiters, arb);
+    return 0;
+  }
+  
+  return 1;
+}
+
+// Hashset filter func to call and throw away post step callbacks.
+static int
+postStepCallbackSetFilter(postStepCallback *callback, cpSpace *space)
+{
+  callback.func(space, callback.obj, callback.data);
+  cpfree(callback);
+  
+  return 0;
+}
+
+#pragma mark All Important cpSpaceStep() Function
+
+void
+cpSpaceStep(cpSpace *space, cpFloat dt)
+{
+  if(!dt) return; // don't step if the timestep is 0!
+  
+  cpFloat dt_inv = 1.0f/dt;
+
+  cpArray *bodies = space.bodies;
+  cpArray *constraints = space.constraints;
+  
+  space.locked = 1;
+  
+  // Empty the arbiter list.
+  space.arbiters.num = 0;
+
+  // Integrate positions.
+  for(int i=0; i<bodies.num; i++){
+    cpBody *body = (cpBody *)bodies.arr[i];
+    body.position_func(body, dt);
+  }
+  
+  // Pre-cache BBoxes and shape data.
+  cpSpaceHashEach(space.activeShapes, (cpSpaceHashIterator)updateBBCache, NULL);
+  
+  // Collide!
+  cpSpacePushNewContactBuffer(space);
+  cpSpaceHashEach(space.activeShapes, (cpSpaceHashIterator)active2staticIter, space);
+  cpSpaceHashQueryRehash(space.activeShapes, (cpSpaceHashQueryFunc)queryFunc, space);
+  
+  // Clear out old cached arbiters and dispatch untouch functions
+  cpHashSetFilter(space.contactSet, (cpHashSetFilterFunc)contactSetFilter, space);
+
+  // Prestep the arbiters.
+  cpArray *arbiters = space.arbiters;
+  for(int i=0; i<arbiters.num; i++)
+    cpArbiterPreStep((cpArbiter *)arbiters.arr[i], dt_inv);
+
+  // Prestep the constraints.
+  for(int i=0; i<constraints.num; i++){
+    cpConstraint *constraint = (cpConstraint *)constraints.arr[i];
+    constraint.klass.preStep(constraint, dt, dt_inv);
+  }
+
+  for(int i=0; i<space.elasticIterations; i++){
+    for(int j=0; j<arbiters.num; j++)
+      cpArbiterApplyImpulse((cpArbiter *)arbiters.arr[j], 1.0f);
+      
+    for(int j=0; j<constraints.num; j++){
+      cpConstraint *constraint = (cpConstraint *)constraints.arr[j];
+      constraint.klass.applyImpulse(constraint);
+    }
+  }
+
+  // Integrate velocities.
+  cpFloat damping = cpfpow(1.0f/space.damping, -dt);
+  for(int i=0; i<bodies.num; i++){
+    cpBody *body = (cpBody *)bodies.arr[i];
+    body.velocity_func(body, space.gravity, damping, dt);
+  }
+
+  for(int i=0; i<arbiters.num; i++)
+    cpArbiterApplyCachedImpulse((cpArbiter *)arbiters.arr[i]);
+  
+  // run the old-style elastic solver if elastic iterations are disabled
+  cpFloat elasticCoef = (space.elasticIterations ? 0.0f : 1.0f);
+  
+  // Run the impulse solver.
+  for(int i=0; i<space.iterations; i++){
+    for(int j=0; j<arbiters.num; j++)
+      cpArbiterApplyImpulse((cpArbiter *)arbiters.arr[j], elasticCoef);
+      
+    for(int j=0; j<constraints.num; j++){
+      cpConstraint *constraint = (cpConstraint *)constraints.arr[j];
+      constraint.klass.applyImpulse(constraint);
+    }
+  }
+  
+  space.locked = 0;
+  
+  // run the post solve callbacks
+  for(int i=0; i<arbiters.num; i++){
+    cpArbiter *arb = arbiters.arr[i];
+    
+    cpCollisionHandler *handler = arb.handler;
+    handler.postSolve(arb, space, handler.data);
+    
+    arb.state = cpArbiterStateNormal;
+  }
+  
+  // Run the post step callbacks
+  // Use filter as an easy way to clear out the queue as it runs
+  cpHashSetFilter(space.postStepCallbacks, (cpHashSetFilterFunc)postStepCallbackSetFilter, space);
+  
+//  cpFloat dvsq = cpvdot(space.gravity, space.gravity);
+//  dvsq *= dt*dt * space.damping*space.damping;
+//  for(int i=0; i<bodies.num; i++)
+//    cpBodyMarkLowEnergy(bodies.arr[i], dvsq, space.sleepTicks);
+  
+  // Increment the stamp.
+  space.stamp++;
+}
+*/

+ 514 - 0
src/tamias/spacehash.go

@@ -0,0 +1,514 @@
+package tamias
+// import "container/vector"
+
+
+// The spatial hash is Chipmunk's default (and currently only) spatial 
+// index type.
+// Based on a chained hash table.
+
+type SpaceHashElement interface  {
+  HashElement
+  GetBB()(BB)
+}
+
+
+// Used internally to track objects added to the hash
+type Handle struct {
+  // Pointer to the object
+  obj SpaceHashElement
+  // Retain count
+  retain int
+  // Query stamp. Used to make sure two objects
+  // aren't identified twice in the same query.
+  stamp int
+}
+
+// Linked list element for in the chains.
+type SpaceHashBin struct {
+  handle * Handle
+  next * SpaceHashBin
+} 
+
+
+type SpaceHash struct {
+  // Number of cells in the table.
+  numcells int
+  // Dimensions of the cells.
+  celldim Float
+  
+  // Hashset of the handles and the recycled ones.
+  handleSet     *HashSet
+  pooledHandles *Array
+  
+  // The table and the recycled bins.
+  table         []*SpaceHashBin
+  pooledBins    *SpaceHashBin
+  
+  allocatedBuffers *Array
+    
+  // Incremented on each query. See cpHandle.stamp.
+  stamp int
+} 
+
+func (hand *Handle) Init(obj SpaceHashElement) (*Handle) { 
+  hand.obj    = obj
+  hand.retain = 0
+  hand.stamp  = 0
+  return hand
+}
+
+// Equality function for the handleset.
+func (hand *Handle) Equals(el interface {}) (bool) {
+  other, ok := el.(*Handle)
+  if !ok { return false; }
+  return hand == other
+}
+
+// Equality function for the SpaceHash
+func (hash *SpaceHash) Equals(el interface {}) (bool) {  
+  other, ok := el.(*SpaceHash)  
+  if !ok { return false; }
+  return hash == other
+}
+
+
+func (hand *Handle) Retain() { 
+  hand.retain++
+}
+
+
+func (hand *Handle) Release(pooledHandles * Array) {
+  hand.retain--
+  if hand.retain < 1 { 
+    pooledHandles.Push(hand)
+  }    
+}
+
+
+func SpaceHashAlloc() (*SpaceHash) {
+  return &SpaceHash{}
+}
+
+func (hash * SpaceHash) AllocTable(numcells int) {
+  hash.numcells = numcells
+  hash.table = make([]*SpaceHashBin, numcells)
+} 
+
+
+// Transformation function for the handleset.
+/*
+func (handle *Handle) handleSetTrans (hash * SpaceHash) (*Handle) {
+  if(hash.pooledHandles.num == 0){
+    // handle pool is exhausted, make more    
+    count 	:= 100    
+    buffer 	:= make([]Handle, count)
+    for i:=0; i<count; i++ { 
+      hash.pooledHandles.Push(buffer[i])
+    }  
+  }
+  
+  hand := handle.Init(hash.pooledHandles.Pop(), obj)
+  hand.Retain()
+  
+  return hand
+}
+*/
+
+func (hash *SpaceHash) Init(celldim Float, numcells int) (* SpaceHash) {
+  hash.AllocTable(next_prime(numcells))  
+  
+  hash.celldim          = celldim   
+  hash.handleSet        = HashSetNew(0) 
+  hash.pooledHandles 	  = ArrayNew(0)    
+  hash.pooledBins    	  = nil
+  hash.allocatedBuffers = ArrayNew(0)  
+  hash.stamp 		        = 1  
+  
+  return hash
+}
+
+func SpaceHashNew(celldim Float, numcells int) (* SpaceHash) { 
+  return SpaceHashAlloc().Init(celldim, numcells)
+}
+
+func (hash * SpaceHash) recycleBin(bin *SpaceHashBin) {
+  bin.next 	      = hash.pooledBins
+  hash.pooledBins = bin
+}
+
+func (hash * SpaceHash) clearHashCell(idx int) {
+  bin := hash.table[idx]
+  for bin != nil { 
+    next := bin.next
+    bin.handle.Release(hash.pooledHandles)
+    hash.recycleBin(bin)
+    bin   = next
+  }
+  hash.table[idx] = nil
+}
+
+// Clear all cells in the hashtable.
+func (hash * SpaceHash) clearHash() { 
+  for i:=0; i<hash.numcells; i++ {  
+    hash.clearHashCell(i)
+  }  
+}
+
+func (hash * SpaceHash) Destroy() { 
+  hash.clearHash()
+  hash.handleSet.Free()
+  
+  hash.allocatedBuffers = nil
+  hash.pooledHandles	= nil  
+  hash.table		= nil
+}
+
+func (hash * SpaceHash) Free() { 
+  hash.Destroy()  
+}  
+
+
+func (hash * SpaceHash) Resize(celldim Float, numcells int) { 
+  // Clear the hash to release the old handle locks.
+  hash.clearHash()  
+  hash.celldim = celldim
+  hash.AllocTable(next_prime(numcells))
+}
+
+// Return true if the chain contains the handle.
+func (bin *SpaceHashBin) containsHandle(hand *Handle) (bool) {
+  if bin != nil {
+    if (bin.handle == hand) { return true }
+    bin = bin.next
+  }  
+  return false
+}
+
+// Get a recycled or new bin.
+func (hash * SpaceHash) getEmptyBin() (* SpaceHashBin) {
+  bin := hash.pooledBins
+  
+  if bin != nil {
+    hash.pooledBins = bin.next
+    return bin
+  } 
+  // Pool is exhausted, make more
+  count  := 100
+  buffer := make([]SpaceHashBin, count)
+  hash.allocatedBuffers.Push(buffer)
+  
+  // push all but the first one, return the first instead
+  for  i:=1; i<count; i++ { 
+    hash.recycleBin(&buffer[i])
+  }  
+  return &buffer[0] 
+}
+
+// The hash function itself.
+func hash_func(x, y, n HashValue) (HashValue) {
+  return (x*1640531513 ^ y*2654435789) % n
+}
+
+// Much faster than (int)floor(f)
+// Profiling showed floor() to be a sizable performance hog (in C)
+// XXX: check this for golang.
+func (f Float) floor_int() (int) {
+  i := int(f)
+  if f < Float(0.0) && f != Float(i) { 
+    return i - 1
+  }
+  return i 
+}
+
+func (hash * SpaceHash) cellDimensions(bb BB) (int, int, int, int) {
+  // Find the dimensions in cell coordinates.
+  dim 	:= hash.celldim
+  // Fix by ShiftZ
+  l := (bb.L / dim).floor_int()
+  r := (bb.R / dim).floor_int()
+  b := (bb.B / dim).floor_int()
+  t := (bb.T / dim).floor_int()
+  return l, r, b, t
+}
+
+
+func (hash * SpaceHash) hashHandle(hand * Handle, bb BB) { 
+  // Find the dimensions in cell coordinates.
+  l, r, b, t := hash.cellDimensions(bb)
+  n := hash.numcells
+  for  i:=l ;  i<=r; i++ {
+    for j:=b ; j<=t ; j++ {
+      idx := hash_func(HashValue(i), HashValue(j), HashValue(n))
+      bin := hash.table[idx]
+      
+      // Don't add an object twice to the same cell.
+      if bin.containsHandle(hand) { continue; }
+      hand.Retain()
+      
+      // Insert a new bin for the handle in this cell.
+      newBin 		:= hash.getEmptyBin()
+      newBin.handle 	= hand
+      newBin.next 	= bin
+      hash.table[idx] 	= newBin
+    }
+  }
+}
+
+func (hash * SpaceHash) InsertHandle(hand * Handle, obj HashElement, hashid HashValue, bb BB) {
+  hand = hash.handleSet.Insert(hashid, obj).(*Handle)
+  hash.hashHandle(hand, bb)
+}
+
+func (hash * SpaceHash) Insert(obj SpaceHashElement,  hashid HashValue) {
+  hand := hash.handleSet.Find(hashid, obj).(*Handle)
+  hash.hashHandle(hand, obj.GetBB())
+}
+
+func (hash * SpaceHash) RehashObject(obj SpaceHashElement,  hashid HashValue) {
+  hand := hash.handleSet.Find(hashid, obj).(*Handle)
+  hash.hashHandle(hand, obj.GetBB())
+} 
+
+// Hashset iterator function for rehashing the spatial hash. (hash hash hash hash?)
+func handleRehashHelper(bin, data HashElement) {  
+  var hand * Handle     = bin.(*Handle) 
+  var hash * SpaceHash  = data.(*SpaceHash)   
+  hash.hashHandle(hand, hand.obj.GetBB())
+}
+
+func (hash * SpaceHash) Rehash() {
+  hash.clearHash()  
+  // Rehash all of the handles.
+  hash.handleSet.Each(handleRehashHelper, hash)
+}
+
+func (hash * SpaceHash) Remove(obj HashElement,  hashid HashValue) {
+  hand := hash.handleSet.Remove(hashid, obj).(*Handle)  
+  if hand != nil {
+    hand.obj = nil
+    hand.Release(hash.pooledHandles)
+  }
+}
+
+
+
+type SpaceHashIterator func(a, b HashElement)
+type SpaceHashQueryFunc func(a, b, c HashElement) (bool) 
+
+// Used by the cpSpaceHashEach() iterator.
+type eachPair struct {
+  fun 		SpaceHashIterator
+  data   	HashElement
+}
+
+// Equals function for eachPair
+func (pair *eachPair) Equals(el interface {}) (bool) {  
+  other, ok := el.(*eachPair)  
+  if !ok { return false; }
+  return pair == other
+}
+
+
+// Calls the user iterator function. (Gross I know.)
+func eachHelper(bin , data HashElement) {
+  var hand * Handle     = bin.(*Handle)
+  var pair * eachPair   = data.(*eachPair)
+  pair.fun(hand.obj, pair.data)
+}
+
+// Iterate over the objects in the spatial hash.
+func (hash *SpaceHash) Each(fun SpaceHashIterator, data HashElement) {
+  // Bundle the callback up to send to the hashset iterator.
+  pair := &eachPair{fun, data}  
+  hash.handleSet.Each(eachHelper, pair)
+}
+// Calls the callback function for the objects in a given chain.
+func (hash *SpaceHash) query(bin * SpaceHashBin, obj HashElement, 				
+      fun SpaceHashQueryFunc, data HashElement) {
+  for ; bin != nil ; bin = bin.next {
+    hand 	  := bin.handle
+    other 	:= hand.obj    
+    // Skip over certain conditions
+    if hand.stamp == hash.stamp || obj.Equals(other) || other == nil { 
+      continue 
+    } 
+    // Have we already tried this pair in this query?     
+    // Is obj the same as other?
+    // Has other been removed since the last rehash?    
+    fun(obj, other, data)
+    // Stamp that the handle was checked already against this object.
+    hand.stamp = hash.stamp
+  }
+}
+
+func (hash * SpaceHash) PointQuery(point Vect, fun SpaceHashQueryFunc, 
+      data HashElement) {      
+  dim := hash.celldim
+  xf  := (point.X/dim).floor_int()
+  yf  := (point.Y/dim).floor_int()
+  hc  := hash.numcells
+  idx := hash_func(HashValue(xf),  HashValue(yf), HashValue(hc))  
+  // Fix by ShiftZ  
+  hash.query(hash.table[idx], &point, fun, data)
+  // Increment the stamp.
+  // Only one cell is checked, but query() requires it anyway.
+  hash.stamp++
+}
+
+func (hash * SpaceHash) SpaceQuery(obj HashElement, bb BB, 
+      fun SpaceHashQueryFunc, data HashElement) {
+  // Get the dimensions in cell coordinates.
+  l, r, b, t := hash.cellDimensions(bb)   
+  n := hash.numcells
+  
+  // Iterate over the cells and query them.
+  for i:=l ; i<=r; i++ {
+    for j := b; j<=t; j++ {
+      idx := hash_func(HashValue(i), HashValue(j), HashValue(n))
+      hash.query(hash.table[idx], obj, fun, data)
+    }
+  }  
+  // Increment the stamp.
+  hash.stamp++
+}
+
+// Similar to struct eachPair above.
+type queryRehashPair struct {
+  hash * SpaceHash
+  fun SpaceHashQueryFunc
+  data HashElement
+} 
+
+func (pair *queryRehashPair) Equals(el interface {}) (bool) {  
+  other, ok := el.(*queryRehashPair)  
+  if !ok { return false; }
+  return pair == other
+}
+
+// Hashset iterator func used with cpSpaceHashQueryRehash().
+func handleQueryRehashHelper(p1, p2 HashElement) {  
+  var hand * Handle           = p1.(*Handle)
+  var pair * queryRehashPair  = p2.(*queryRehashPair)  
+  // Unpack the user callback data.  
+  hash 	:= pair.hash
+  fun  	:= pair.fun
+
+  // dim 	:= hash.celldim
+  n  	:= hash.numcells
+
+  obj 	:= hand.obj
+  bb 	  := obj.GetBB()
+  var l, r, b, t int
+  l, r, b , t = hash.cellDimensions(bb)
+
+  for i := l; i<=r; i++ {
+    for j := b; j<=t; j++ {
+//  // exit the loops if the object has been deleted in func().
+//      if(!hand.obj) goto break_out
+      
+      idx := hash_func(HashValue(i), HashValue(j), HashValue(n))
+      bin := hash.table[idx]
+      
+      if bin.containsHandle(hand) { continue  }      
+      hand.Retain() 
+      // this MUST be done first in case the object is removed in func()
+      hash.query(bin, obj, fun, pair.data)
+      
+      newBin 	       := hash.getEmptyBin()
+      newBin.handle 	= hand
+      newBin.next 	= bin
+      hash.table[idx] 	= newBin
+    }
+  }  
+  //  break_out:
+  // Increment the stamp for each object we hash.
+  hash.stamp++
+}
+
+func (hash * SpaceHash) hashRehash(fun SpaceHashQueryFunc, data HashElement) {
+  hash.clearHash()  
+  pair := &queryRehashPair{hash, fun, data}
+  hash.handleSet.Each(handleQueryRehashHelper, pair)
+}
+
+type SpaceHashSegmentQueryFunc func (obj, other, data HashElement) (Float)
+
+
+func (hash * SpaceHash) segmentQuery(bin *SpaceHashBin , obj HashElement, 
+  fun SpaceHashSegmentQueryFunc, data HashElement) (Float) {
+  
+  t := Float(1.0)
+   
+  for  ; bin != nil ; bin = bin.next {
+    hand := bin.handle
+    other := hand.obj
+    
+    // Skip over certain conditions
+    if hand.stamp == hash.stamp || other == nil { continue; }
+      // Have we already tried this pair in this query?
+      // Has other been removed since the last rehash?    
+    // Stamp that the handle was checked already against this object.
+    hand.stamp = hash.stamp
+    t = t.Min(fun(obj, other, data))    
+  }
+  return t
+}
+
+// modified from http://playtechs.blogspot.com/2007/03/raytracing-on-grid.html
+func (hash * SpaceHash) SegmentQuery(obj HashElement, a, b Vect, t_exit Float, 			fun SpaceHashSegmentQueryFunc, data HashElement) {
+  a = a.Mult(Float(1.0)/hash.celldim)
+  b = b.Mult(Float(1.0)/hash.celldim)
+  
+  dt_dx  := Float(1.0)/((b.X - a.X).Abs())
+  dt_dy  := Float(1.0)/((b.Y - a.Y).Abs())
+  cell_x := (a.X).floor_int() 
+  cell_y := (a.Y).floor_int()
+  t 	   := Float(0.0)
+  
+  var x_inc	, y_inc int
+  var temp_v, temp_h Float
+
+  if b.X > a.X {
+    x_inc  = 1
+    temp_h = (a.X + Float(1.0)).Floor() - a.X
+  } else {
+    x_inc  = -1
+    temp_h = a.X - a.X.Floor()
+  }
+
+  if b.Y > a.Y {
+    y_inc  = 1
+    temp_v = (a.Y + Float(1.0)).Floor() - a.Y
+  } else {
+    y_inc = -1
+    temp_v = a.Y - a.Y.Floor()
+  }
+  
+  // fix NANs in horizontal directions
+  next_h := dt_dx
+  if temp_h != 0.0 { 
+    next_h = temp_h*dt_dx
+  }
+  
+  next_v := dt_dy
+  if temp_v != 0.0 { 
+    next_v = temp_v*dt_dy
+  }
+
+  n := hash.numcells
+  for t < t_exit {
+    idx := hash_func(HashValue(cell_x), HashValue(cell_y), HashValue(n))
+    t_exit = t_exit.Min(hash.segmentQuery(hash.table[idx], obj, fun, data))
+    
+    if (next_v < next_h){
+      cell_y += y_inc
+      t       = next_v
+      next_v += dt_dy
+    } else {
+      cell_x += x_inc
+      t       = next_h
+      next_h += dt_dx
+    }
+  }  
+  hash.stamp++
+}

+ 194 - 0
src/tamias/spacemap.go

@@ -0,0 +1,194 @@
+package tamias
+import "container/list"
+import "fmt"
+import "exp/iterable"
+
+// Spacemap is an alternative implementation of the spacehash
+
+type SpaceMapElement interface{
+  GetBB() (*BB)  
+}
+
+type SpaceMapKey uint64
+
+type SpaceMapCell struct {
+  Shapes * list.List
+}
+
+type rawSpaceMap map[SpaceMapKey] SpaceMapCell
+
+
+
+type SpaceMap struct {  
+  table map[SpaceMapKey] SpaceMapCell;
+  // Number of cells in the table.
+  numcells int
+  // Dimensions of the cells.
+  cellsize Float
+  // Incremented on every query
+  stamp int
+}
+
+func (cell * SpaceMapCell) Init() (* SpaceMapCell) {
+  cell.Shapes = list.New()
+  return cell
+}
+
+// Find finds a space map element in a cell, or return nil if not found
+func (cell * SpaceMapCell) Find(el SpaceMapElement) (SpaceMapElement){
+      finder    := func (val interface {}) (bool) { 
+	       testel := val.(SpaceMapElement)
+	       return el == testel
+      }  
+      found := iterable.Find((cell.Shapes), finder)
+      if found == nil { return nil }      
+      return found.(SpaceMapElement)
+}
+
+// Removes as space map element from a space map cell and removes it,
+// or return nil if not found
+func (cell * SpaceMapCell) Remove(el SpaceMapElement) (SpaceMapElement) {
+      var e *list.Element
+      for e = cell.Shapes.Front() ; e != nil ; e = e.Next() {
+        val := e.Value
+        if val.(SpaceMapElement) == el {
+          cell.Shapes.Remove(e)
+          return el
+        }
+      }
+      return nil
+}
+
+
+
+// Insert inserts a space map element into the cell
+func (cell * SpaceMapCell) Insert(el SpaceMapElement) (SpaceMapElement) { 
+      cell.Shapes.PushBack(el)
+      return el
+}
+
+func (cell * SpaceMapCell) String() (string) {
+  return fmt.Sprint("SpaceMap: ", iterable.Data(cell.Shapes))
+}
+
+// SpaceMapAllocate allocates a new spacemap and returns a pointer to it 
+func SpaceMapAllocate() (* SpaceMap) {
+  return &SpaceMap{}
+}
+
+// Init initializes the SpaceMap with the given cell size and amount of cells
+func (sm * SpaceMap) Init(cellsize Float, numcells int) (*SpaceMap) {
+  sm.numcells = numcells
+  sm.cellsize = cellsize
+  sm.table    = make(rawSpaceMap,  sm.numcells)
+  for i := 0; i < sm.numcells ; i++ {
+    cell, ok := sm.table[SpaceMapKey(i)]
+    if !ok {
+      cell          = SpaceMapCell{}
+      cell.Init()  
+      sm.table[SpaceMapKey(i)] = cell        
+    }
+    cell.Init()
+  }  
+  return sm
+}
+
+func SpaceMapNew(cellsize Float, numcells int) (*SpaceMap) {
+  return new(SpaceMap).Init(cellsize, numcells)
+}
+
+
+// The hash function itself.
+func space_hash(x, y, n uint64) (SpaceMapKey) {
+  return SpaceMapKey((x*1640531513 ^ y*2654435789) % n)
+}
+
+// cellDimensions finds the dimensions of a bounds box in cell coordinates.
+// Returns them in order left, top, right, bottom
+func (sm * SpaceMap) cellDimensions(bb * BB) (int, int, int, int) {  
+  dim 	:= sm.cellsize
+  // Fix by ShiftZ  
+  l := (bb.L / dim).floor_int()  
+  t := (bb.T / dim).floor_int()
+  r := (bb.R / dim).floor_int()
+  b := (bb.B / dim).floor_int()  
+  return l, t, r, b
+}
+
+// Insert inserts an element into the SpaceMap
+func (sm * SpaceMap) Insert(el SpaceMapElement) (SpaceMapElement) {
+  bb := el.GetBB()
+  l, t, r, b := sm.cellDimensions(bb)
+  fmt.Println("Insert!", l, "->", r, " & ", t,"->", b)
+  // the bounds box may be in several cells, so iterate over them
+  for i := l ; i <= r ; i++ {
+    for j := b ; j <= t ; j++ {
+      cell      := sm.FindPoint(i, j)
+      old       := cell.Find(el)      
+      if old != nil { fmt.Println("Skip!", i, j); continue }
+      // Already in this spatial cell. Move on to the next one 
+      fmt.Println("Inserting element: ", i, j)
+      cell.Insert(el)
+      // Add it to this cell.      
+    }
+  }
+  return el
+}
+
+// Looks up the SpaceMapCell that the point with coordinates X and Y is in.
+// returns nil if not found 
+func (sm * SpaceMap) FindPoint(x, y int) (*SpaceMapCell) {
+  xx := uint64(x)
+  yy := uint64(y)
+  hk := space_hash(xx, yy, uint64(sm.numcells))
+  fmt.Println("hk: ", hk)
+  cell := sm.table[hk]
+  return &cell
+}
+
+// Looks up the SpaceMapCell that the vector is in
+func (sm * SpaceMap) FindVect(p Vect) (*SpaceMapCell) {
+  return sm.FindPoint( int(p.X.Floor()), int(p.Y.Floor()) )
+}
+
+// Returns a vector with all SpaceMapElements in the given bounds box.
+// Or returns nil if nothing was found
+func (sm * SpaceMap) FindBB(bb *BB) (* list.List) {
+  result := list.New()
+  num    := 0
+  l, t, r, b := sm.cellDimensions(bb)
+  // the bounds box may be in several cells, so iterate over them
+  for i := l ; i <= r ; i++ {
+    for j := t ; j <= b ; j++ {
+      cell      := sm.FindPoint(i, j)
+      for found := range cell.Shapes.Iter() { 
+	       result.PushBack(found)
+         num++ 
+      }
+    }
+  }
+  if num < 1 { return nil }
+  return result
+}
+
+// RehashObject moves the object to the right hash bucket. 
+// Call this after it has moved. 
+func (sm * SpaceMap) RehashObject(el SpaceMapElement) {
+
+
+}
+
+
+
+func (sm * SpaceMap) String() (string) {
+  return fmt.Sprint("SpaceMap: ", sm.table )
+}
+
+
+
+
+
+
+
+
+

+ 30 - 0
src/tamias/tamias.go

@@ -0,0 +1,30 @@
+package tamias
+
+
+import "os"
+import "fmt"
+
+func Fatal(error string) {
+  fmt.Fprintln(os.Stderr, "Assertion failed: ", error);
+  os.Exit(1)
+}
+
+func Assert(cond bool, err string)  {
+  if cond { 
+    return 
+  }
+  Fatal(err) 
+}
+
+
+
+
+
+
+
+
+
+
+
+
+

+ 96 - 0
src/tamias/tamias_test.go

@@ -0,0 +1,96 @@
+package tamias
+
+import "testing"
+
+
+func assert(test * testing.T, cond bool, err string, args ... interface{})  {
+  if cond {
+    return
+  }
+  test.Errorf(err, args) 
+}
+
+
+func TestFloa(t * testing.T) {
+  f1 := tamias.Float(10.0)
+  f2 := tamias.Float(20.0)
+  assert(t, f1 == 10.0, "Float should equal it's initial value 10.0", f1)
+  assert(t, f1.Equals(10.0), "Float should equal it's initial value 10.0", f1)
+  assert(t, f1.Min(f2) == f1, "Min must work properly.", f1)
+  assert(t, f1.Max(f2) == f2, "Max must work properly.", f1)
+  assert(t, f2.Min(f1) == f1, "Min must work properly in reverse.", f1)
+  assert(t, f2.Max(f1) == f2, "Max must work properly in reverse.", f1)  
+}
+
+func TestBB(t * testing.T) {
+  bb := tamias.BBMake(10.0, 20.0, 40.0, 80.0)  
+  // assert(bb != nil , "Bounds Box must be constructable")
+  assert(t, bb.L == 10.0, "Bounds Box L must be 10.0", bb.L)
+  assert(t, bb.T == 20.0, "Bounds Box T must be 20.0", bb.T)
+  assert(t, bb.R == 40.0, "Bounds Box R must be 40.0", bb.R)
+  assert(t, bb.B == 80.0, "Bounds Box B must be 80.0", bb.B)
+  b2 := bb.Grow(10.0)
+  assert(t, b2.Contains(bb), "Contains and grow work correctly.", bb, b2)
+}
+
+
+func TestShape(t * testing.T) {
+  body := tamias.BodyNew(10.0, 0.0)
+  box  := tamias.BoxShapeNew(body, 20.0, 30.0)  
+  box.CacheBB(body.Pos(), body.Rot())
+  assert(t, box.GetBB() != nil, "Box must have a bounds box")
+  if box.GetBB() != nil { 
+    // the object should have been placed at (0,0), so half the BB
+    // is positve and half negative
+    // chipmunk, and hence Tamias too, use a normal Carthesian 
+    // coordinate system where the zero is in the bottom left 
+    assert(t, box.Shape.BB == box.GetBB(), "BB and GetBB() are the same")  
+    assert(t, box.GetBB().L == -10.0, "Box must have BB.L -10.0", box.GetBB().L)
+    assert(t, box.GetBB().T == 15.0, "Box must have BB.T -15.0", box.GetBB().T)
+    assert(t, box.GetBB().R == 10.0, "Box must have BB.L 10.0", box.GetBB().R)
+    assert(t, box.GetBB().B == -15.0, "Box must have BB.T -15.0", box.GetBB().B)
+  }  
+}
+
+func TestVect(t * testing.T) {  
+  v1 := tamias.VF(3.0, 4.0)
+  v2 := tamias.V(1.0, 0.0)
+  // tamias.V(3.0, 4.0)
+  assert(t, v1.X == 3.0, "v1.X should be 3.0", v1.X)
+  assert(t, v1.Y == 4.0, "v1.Y should be 4.0", v1.Y)
+  assert(t, v2.X == 1.0, "v1.X should be 1.0", v2.X)
+  assert(t, v2.Y == 0.0, "v1.Y should be 0.0", v2.Y)
+  assert(t, v1.Length() == 5.0, "Vector length should be 5.")
+  assert(t, v1.Equals(v1), "Vector should be equal to itself.")
+  assert(t, v1.Add(v2).X == 4.0, "Vector Sum X should be 4.")
+  assert(t, v1.Add(v2).Y == 4.0, "Vector Sum X should be 4.")
+  vm :=	v1.Mult(tamias.Float(2.0))
+  assert(t, vm.Y == 8.0, "Vector Mult Y should be 8.0.", vm.X, vm.Y)
+  
+  
+}
+
+func TestSpaceMap(t * testing.T) {  
+  sm    := tamias.SpaceMapNew(10.0, 25)
+  body  := tamias.BodyNew(10.0, 0.0)
+  box   := tamias.BoxShapeNew(body, 20.0, 30.0)
+  assert(sm != nil, "SpaceMap should be constructable")
+  sm.Insert(box)
+  bb    := box.GetBB().Grow(10.0)
+  found := sm.FindBB(&bb)
+  assert(t, found != nil, "SpaceMap should find back inserted items.", bb)
+  if found != nil { 
+    block := func(el interface {})(bool) {
+      fmt.Println((el.(*tamias.PolyShape)))
+      return el.(*tamias.PolyShape) == box
+    } 
+    res  := iterable.Find(found, block)
+    assert(t, res != nil, "SpaceMap should find back the *right* inserted items.", found)
+  } else {
+    t.Errorf("SpaceMap could not find insertd item. (%s)\n", sm.String()) 
+    // fmt.Printf()
+  }  
+   
+  
+}
+

+ 83 - 0
src/tamias/util.go

@@ -0,0 +1,83 @@
+package tamias
+
+
+/*
+func J_MAX(constraint Constraint, dt Float) (Vect) {
+  return Constraint.maxForce * dt
+} 
+*/
+
+func RelativeVelocity(a, b *Body, r1, r2 Vect) (Vect){
+	v1_sum := a.v.Add(r1.Perp().Mult(a.w))
+	v2_sum := b.v.Add(r2.Perp().Mult(b.w))
+	return v2_sum.Sub(v1_sum)
+}
+
+func NormalRelativeVelocity(a, b *Body, r1, r2, n  Vect) (Float) {
+	return RelativeVelocity(a, b, r1, r2).Dot(n);
+}
+
+func ApplyImpulses (a, b *Body, r1, r2, j Vect) {
+	a.ApplyImpulse(j.Neg()	, r1)
+	b.ApplyImpulse(j	, r2)
+}
+
+func ApplyBiasImpulses(a, b *Body, r1, r2, j Vect) {
+	a.ApplyBiasImpulse(j.Neg()	, r1)
+	b.ApplyBiasImpulse(j		, r2)
+}
+
+func KScalar(a, b *Body, r1, r2, n Vect) (Float) {
+	mass_sum := a.m_inv + b.m_inv;
+	r1cn 	 := r1.Cross(n) 
+	r2cn 	 := r2.Cross(n) 
+	value    := mass_sum + a.i_inv*r1cn*r1cn + b.i_inv*r2cn*r2cn
+	Assert(value != 0.0, "Unsolvable collision or constraint.")	
+	return value
+}
+
+func KTensor(a, b *Body, r1, r2 Vect) (k1, k2 Vect) { 
+	// calculate mass matrix
+	// If I wasn't lazy and wrote a proper matrix class, this wouldn't be so gross...	
+	m_sum := a.m_inv + b.m_inv
+	
+	// start with I*m_sum
+	k11 := m_sum	      ; k12 := Float(0.0)
+	k21 := Float(0.0)    ;	k22 := m_sum
+	
+	// add the influence from r1
+	a_i_inv := a.i_inv;
+	r1xsq 	:=  r1.X * r1.X * a_i_inv
+	r1ysq 	:=  r1.Y * r1.Y * a_i_inv
+	r1nxy 	:= -r1.X * r1.Y * a_i_inv
+	k11 += r1ysq; k12 += r1nxy
+	k21 += r1nxy; k22 += r1xsq
+	
+	// add the influence from r2
+	b_i_inv := b.i_inv;
+	r2xsq 	:= r2.X * r2.X * b_i_inv
+	r2ysq 	:= r2.Y * r2.Y * b_i_inv
+	r2nxy 	:= -r2.X * r2.Y * b_i_inv
+	k11 += r2ysq; k12 += r2nxy
+	k21 += r2nxy; k22 += r2xsq
+	
+	// invert
+	determinant := k11*k22 - k12*k21
+	Assert(determinant != 0.0, "Unsolvable constraint.")
+	
+	det_inv := 1.0 / determinant;
+	k1 	= V( k22*det_inv, -k12*det_inv)
+	k2 	= V(-k21*det_inv,  k11*det_inv)
+	return k1, k2
+}
+
+func (vr Vect) MultK(k1, k2 Vect) (Vect) {
+  return V(vr.Dot(k1), vr.Dot(k2));
+}
+
+
+/*
+#define CP_DefineClassGetter(t) const cpConstraintClass * t##GetClass(){return (cpConstraintClass *)&klass;}
+
+#define J_MAX(constraint, dt) (((cpConstraint *)constraint)->maxForce*(dt))
+*/

+ 190 - 0
src/tamias/vect.go

@@ -0,0 +1,190 @@
+/* Copyright (c) 2010 Beoran
+ * Copyright (c) 2007 Scott Lembcke
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package tamias
+
+import "fmt"
+
+// import "math"
+
+// type Float float;
+
+
+type Vect struct {
+  X Float
+  Y Float
+}
+
+var VZERO=Vect{0.0,0.0};
+
+func V(x Float, y Float) (Vect) {
+  result := Vect{X:x, Y:y}
+  return result
+}
+
+func VF(x float, y float) (Vect) {
+  return V(Float(x), Float(y))
+}
+
+func (v1 Vect) Equals(o interface {}) (bool) {
+  v2, ok := o.(Vect)
+  if !ok { return false; }   
+  return (v1.X == v2.X) && (v1.Y == v2.Y) 
+}
+
+// non-inlined functions
+/*
+cpFloat cpvlength(const cpVect v);
+cpVect cpvslerp(const cpVect v1, const cpVect v2, const cpFloat t);
+cpVect cpvslerpconst(const cpVect v1, const cpVect v2, const cpFloat a);
+cpVect cpvforangle(const cpFloat a); // convert radians to a normalized vector
+cpFloat cpvtoangle(const cpVect v); // convert a vector to radians
+char *cpvstr(const cpVect v); // get a string representation of a vector
+*/
+
+func (v1 Vect) Add(v2 Vect) (Vect) {
+  return V(v1.X + v2.X, v1.Y + v2.Y)
+}
+
+func (v Vect) Neg() (Vect) {
+  return V(-v.X, -v.Y)
+}
+
+func (v1 Vect) Sub(v2 Vect) (Vect) {
+  return V(v1.X - v2.X, v1.Y - v2.Y)
+}
+
+func (v1 Vect) Mult(s Float) (Vect) {
+  return V(v1.X * s, v1.Y * s)
+}
+
+func (v1 Vect) Dot(v2 Vect) (Float) {
+  return v1.X * v2.X + v1.Y * v2.Y
+}
+
+func (v1 Vect) Cross(v2 Vect) (Float) {
+  return v1.X * v2.Y - v1.Y * v2.X
+}
+
+func (v Vect) Perp() (Vect) {
+  return V(-v.Y, v.X)
+}
+
+func (v Vect) Rperp() (Vect) {
+  return V(v.Y, -v.X)
+}
+
+func (v1 Vect) Project(v2 Vect) (Vect) {
+  return v2.Mult(v1.Dot(v2) / v2.Dot(v2))
+}
+
+func (v1 Vect) Rotate(v2 Vect) (Vect) {
+  return V(v1.X*v2.X - v1.Y*v2.Y, v1.X*v2.Y + v1.Y*v2.X)
+}
+
+func (v1 Vect) Unrotate(v2 Vect) (Vect) {
+  return V(v1.X*v2.X + v1.Y*v2.Y, v1.Y*v2.X - v1.X*v2.Y)
+}
+
+func (v Vect) Lengthsq() (Float) {
+  return v.Dot(v)
+}
+
+func (v Vect) Length() (Float) {
+  return v.Lengthsq().Sqrt()
+}
+
+
+func (v1 Vect) Lerp(v2 Vect, t Float) (Vect) {
+  aid1 := v1.Mult(1.0 - t)
+  aid2 := v2.Mult(t)
+  return aid1.Add(aid2)
+}
+
+func (v Vect) Normalize() (Vect) {
+  return v.Mult(1.0 / v.Length())
+}
+
+func (v Vect) NormalizeSafe() (Vect) {
+  if v.X == 0.0 && v.Y == 0.0 { 
+    return VZERO 
+  } 
+  return v.Normalize()
+}
+
+
+func (v Vect) Clamp(len Float) (Vect) {
+  if  v.Lengthsq() > (len * len) {
+    return v.Normalize().Mult(len)
+  } 
+  return v
+}
+
+func (v1 Vect) Lerpconst(v2 Vect, d Float) (Vect) {
+  return v1.Add(v2.Sub(v1).Clamp(d));
+}
+
+func (v1 Vect) Dist(v2 Vect) (Float) {
+  return v1.Sub(v2).Length()
+}
+
+func (v1 Vect) Distsq(v2 Vect) (Float) {
+  return v1.Sub(v2).Lengthsq()
+}
+
+
+func (v1 Vect) Near(v2 Vect, dist Float) (bool) {
+  return v1.Distsq(v2) < dist*dist
+}
+
+
+func (v1 Vect) Slerp(v2 Vect, t Float) (Vect) {
+  omega := v1.Dot(v2).Acos();
+  if(omega != 0.0){
+    denom := 1.0 / omega.Sin();
+    par1  := ((1.0 - t)*omega).Sin() *denom
+    par2  := ((t)*omega).Sin() *denom
+    aid1  := v1.Mult(par1)
+    aid2  := v2.Mult(par2)
+    return aid1.Add(aid2) 
+  }     
+  // else
+  return v1;
+}
+
+func (v1 Vect) Slerpconst(v2 Vect, a Float) (Vect) {
+  angle := v1.Dot(v2).Acos()
+  return v1.Slerp(v2, a.Min(angle) / angle)
+}
+
+func ForAngle(a Float) (Vect)  {
+  return V(a.Cos(), a.Sin());
+}
+
+func (v Vect) ToAngle() (Float) {
+  return v.Y.Atan2(v.X)
+}
+
+func (v Vect) String() (string) {
+  return fmt.Sprintf("(%.3f, %.3f)", v.X, v.Y)  
+}
+