Electronics · python · raspberry pi

[BROKEN]Turn on a lamp with a gesture – IR Cam Image Processing!

rasppotteropener-1jb

Make has a write-up on wand-control with a simple reflective wand.

But… I just worked through it… its *way* broken.  The git pull brings down the source with improper tabs and spaces.  Also, its specific to python 2, as the print statements all have to be replaced.

 

For now – turn back.

 

Check it out – the build is overly complicated – but it’s a whole package 😉 :

http://makezine.com/projects/raspberry-pi-potter-wand/

The pearl stickers were hard to find for me.  So I ordered retro-reflective tape:

61aa7ewie4l-_sl1200_

Retroreflective Tape

How this technique works…

This technique uses image processing to track the wands position through a series of pictures taken on the camera.  It first has to find the wand within the view, once its identified the wand light, it uses a function in the OpenCV package to track its movement:

calcOpticalFlowPyrLK:  Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with pyramids.

This provides points from the image set which can be matched against the gesture “shapes”.  Where the shapes check simply takes two line segments, identifies them as move up,left,down or right.  The combination of any two creates a recognizable request.

It’s really quite brilliant in its simplicity.

The code for the image recognition is found here:

https://github.com/sean-obrien/rpotter/

And it’s wonderfully tiny:

#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
  _\
  \
O O-O
 O O
  O
  
Raspberry Potter
Version 0.1.1

Use your own wand or your interactive Harry Potter wands to control the IoT.  


Copyright (c) 2015 Sean O'Brien.  Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import io
import cv2
from cv2 import *
from cv import *
import picamera
import numpy as np
import threading
import sys
import math
import time
import pigpio

GPIOS=32
MODES=["INPUT", "OUTPUT", "ALT5", "ALT4", "ALT0", "ALT1", "ALT2", "ALT3"]

pi = pigpio.pi()

#pin for Powerswitch (Lumos,Nox)
switch_pin = 16
pi.set_mode(switch_pin,pigpio.OUTPUT)

#pin for Trinket (Colovario)
trinket_pin = 12
pi.set_mode(trinket_pin,pigpio.OUTPUT)

print "Initializing point tracking"


# Parameters
lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
blur_params = (9,9)
dilation_params = (5, 5)
movment_threshold = 80

# start capturing
cv2.namedWindow("Raspberry Potter")
cam = cv2.VideoCapture(0)
cam.set(3, 640)
cam.set(4, 480)


def Spell(spell):    
    #clear all checks
    ig = [[0] for x in range(15)] 
    #Invoke IoT (or any other) actions here
    cv2.putText(mask, spell, (5, 25),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,0,0))
    if (spell=="Colovaria"):
	print "GPIO trinket"
	pi.write(trinket_pin,0)
	time.sleep(1)
	pi.write(trinket_pin,1)
    elif (spell=="Lumos"):
	print "GPIO ON"
	pi.write(switch_pin,1)
    elif (spell=="Nox"):
	print "GPIO OFF"
	pi.write(switch_pin,0)
    print "CAST: %s" %spell
    

def IsGesture(a,b,c,d,i):
    print "point: %s" % i
    #record basic movements - TODO: trained gestures
    if ((a<(c-5))&(abs(b-d)<1)):
        ig[i].append("left")
    elif ((c<(a-5))&(abs(b-d)<1)):
        ig[i].append("right")
    elif ((b<(d-5))&(abs(a-c)<5)):
        ig[i].append("up")
    elif ((d<(b-5))&(abs(a-c)<5)):
        ig[i].append("down")
    #check for gesture patterns in array
    astr = ''.join(map(str, ig[i]))
    if "rightup" in astr:
        Spell("Lumos")
    elif "rightdown" in astr:
        Spell("Nox")
    elif "leftdown" in astr:
        Spell("Colovaria")
    print astr
    
def FindWand():
    global rval,old_frame,old_gray,p0,mask,color,ig,img,frame
    try:  
        rval, old_frame = cam.read()
	cv2.flip(old_frame,1,old_frame)
	old_gray = cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
    
        #TODO: trained image recognition
        p0 = cv2.HoughCircles(old_gray,cv2.cv.CV_HOUGH_GRADIENT,3,100,param1=100,param2=30,minRadius=4,maxRadius=15)
	p0.shape = (p0.shape[1], 1, p0.shape[2])
        p0 = p0[:,:,0:2] 
        mask = np.zeros_like(old_frame)
        ig = [[0] for x in range(20)] 
        print "finding..."
        threading.Timer(3, FindWand).start()
    except:
        e = sys.exc_info()[1]
        print "Error: %s" % e 
	cam.release()  
        cv2.destroyAllWindows()
        exit
        
def TrackWand():
        global rval,old_frame,old_gray,p0,mask,color,ig,img,frame
        color = (0,0,255)
	rval, old_frame = cam.read()
	cv2.flip(old_frame,1,old_frame)
	old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
  
        # Take first frame and find circles in it
        p0 = cv2.HoughCircles(old_gray,cv2.cv.CV_HOUGH_GRADIENT,3,100,param1=100,param2=30,minRadius=4,maxRadius=15)
        try:
		p0.shape = (p0.shape[1], 1, p0.shape[2])
        	p0 = p0[:,:,0:2] 
        except:
            	print "No points found"         
	# Create a mask image for drawing purposes
        mask = np.zeros_like(old_frame)

	while True:
		rval, frame = cam.read()
		cv2.flip(frame,1,frame)
        	frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
   		try:     
        	# calculate optical flow
			p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

        	# Select good points
            		good_new = p1[st==1]
            		good_old = p0[st==1]

            		# draw the tracks
            		for i,(new,old) in enumerate(zip(good_new,good_old)):
                		a,b = new.ravel()
                		c,d = old.ravel()
                		# only try to detect gesture on highly-rated points (below 15)
                		if (i<15):
                    			IsGesture(a,b,c,d,i)
                		dist = math.hypot(a - c, b - d)
                		if (dist<movment_threshold):
                    			cv2.line(mask, (a,b),(c,d),(0,255,0), 2)
                		cv2.circle(frame,(a,b),5,color,-1)
                		cv2.putText(frame, str(i), (a,b), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,0,255))
        	except IndexError:
            		print "Index error"  
		        cam.release()
		        cv2.destroyAllWindows()
        		break          
        	except:
            		e = sys.exc_info()[0]
            		print "Error: %s" % e 
			cam.release()
		        cv2.destroyAllWindows()
        		break
        	img = cv2.add(frame,mask)

        	cv2.putText(img, "Press ESC to close.", (5, 25),
                    cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
        	cv2.imshow("Raspberry Potter", frame)

        	# get next frame
        	rval, frame = cam.read()

        	# Now update the previous frame and previous points
        	old_gray = frame_gray.copy()
        	p0 = good_new.reshape(-1,1,2)
        	key = cv2.waitKey(20)
        	if key in [27, ord('Q'), ord('q')]: # exit on ESC
            		break
            
FindWand()
TrackWand()            
cv2.destroyAllWindows()
cam.release()

Make it simpler…

You could remove the particle board and combine the recognition with one of these techniques:

Raspberry PI Controlling Power through GPIO (no wifi needed)

Raspberry PI for Controlling Wemo Power

Raspberry PI for controlling TP-Link POWER

I’ll let you know…

I’ll re-create as soon as my NOIR cam comes in:

51ifpmhdgwl-_sl1000_

Ir Camera

If you have a little more funds and less time – the build where the smarts are in the wand can be found here:

Raspberry Pi – Control by Magic Wand!

Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out / Change )

Twitter picture

You are commenting using your Twitter account. Log Out / Change )

Facebook photo

You are commenting using your Facebook account. Log Out / Change )

Google+ photo

You are commenting using your Google+ account. Log Out / Change )

Connecting to %s