-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsonic-track.py
454 lines (400 loc) · 18.6 KB
/
sonic-track.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
#!/usr/bin/env python3
progVer = "ver 0.91"
import os
mypath=os.path.abspath(__file__) # Find the full path of this python script
baseDir=mypath[0:mypath.rfind("/")+1] # get the path location only (excluding script name)
baseFileName=mypath[mypath.rfind("/")+1:mypath.rfind(".")]
progName = os.path.basename(__file__)
print("%s %s using sonic-pi, web or pi-camera, python3 and OpenCV" % (progName, progVer))
print("Loading Please Wait ....")
# Check for config variable file to import and error out if not found.
configFilePath = baseDir + "config.py"
if not os.path.exists(configFilePath):
print("ERROR - Missing config.py file - Could not find Configuration file %s" % (configFilePath))
import urllib2
config_url = "https://raw.github.com/pageauc/sound-track/master/config.py"
print(" Attempting to Download config.py file from %s" % ( config_url ))
try:
wgetfile = urllib2.urlopen(config_url)
except:
print("ERROR - Download of config.py Failed")
print(" Try Rerunning the sound-track-install.sh Again.")
print(" or")
print(" Perform GitHub curl install per Readme.md")
print(" and Try Again")
print("Exiting %s" % ( progName ))
quit()
f = open('config.py','wb')
f.write(wgetfile.read())
f.close()
# Read Configuration variables from config.py file
from config import *
# import the necessary packages
import io
import time
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
from psonic import *
# Calculated Variables Should not need changing by user
####
# See if Web Cam is selected
if WEBCAM:
CAMERA_WIDTH = WEBCAM_WIDTH
CAMERA_HEIGHT = WEBCAM_HEIGHT
# Increase size of openCV display window
big_w = int(CAMERA_WIDTH * windowBigger)
big_h = int(CAMERA_HEIGHT * windowBigger)
# initialize hotspot area variables
menuTimeout = 2.0
synthHotxy = (int(CAMERA_WIDTH/synthHotSize),int(CAMERA_HEIGHT/synthHotSize))
# split screen into horz and vert zones for note changes
octaveHotxy = (int(CAMERA_WIDTH/octaveHotSize),int(CAMERA_HEIGHT/octaveHotSize))
octaveStart = octavePicks[0]
notesTotal = len(octaveList[octaveStart][1])
notesHorizZone = int(CAMERA_WIDTH / (notesTotal - 1)) # Calculate Zone Area index
notesVertZone = int(CAMERA_HEIGHT /(notesTotal - 1))
drumHotxy = (int(CAMERA_WIDTH/drumHotSize),int(CAMERA_HEIGHT/drumHotSize)) #Not implemented
drumsTotal = len(drumPicks)
drumHorizZone = int(CAMERA_WIDTH / (drumsTotal - 1)) # Calculate Zone Area index
drumVertZone = int(CAMERA_HEIGHT /(drumsTotal - 1))
noteSleepMin = float(noteSleepMin) # make sure noteSleepMin is a float
# Color data for OpenCV lines and text
cvBlue = (255,0,0)
cvGreen = (0,255,0)
cvRed = (0,0,255)
FONT_SCALE = .3 # OpenCV window text font size scaling factor default=.5 (lower is smaller)
# These OpenCV Threshold Settings should not have to changed
THRESHOLD_SENSITIVITY = 25
BLUR_SIZE = 10
# These Three functions are optional thread loops
# that can be edited and activated/deactivated from config.py
#----------------------------------------------------------------------------------------------
def drumBass(): # Edit this optional thread loop see config.py drumBassOn variable
c = chord(E3, MAJOR7)
while True:
use_synth(PROPHET)
play(random.choice(c), release=0.6)
sleep(0.5)
#-----------------------------------------------------------------------------------------------
def drumKick(): # Edit this optional thread loop see config.py drumKickOn Variable
while True:
sample(DRUM_HEAVY_KICK)
sleep(1)
#-----------------------------------------------------------------------------------------------
def drumSnare(): # Edit this optional thread loop see config.py drumSnareOn Variable
while True:
sample(DRUM_SNARE_HARD)
sleep(1)
#-----------------------------------------------------------------------------------------------
class PiVideoStream:
def __init__(self, resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=CAMERA_FRAMERATE, rotation=0, hflip=False, vflip=False):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.rotation = rotation
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
#-----------------------------------------------------------------------------------------------
class WebcamVideoStream:
def __init__(self, CAM_SRC=WEBCAM_SRC, CAM_WIDTH=WEBCAM_WIDTH, CAM_HEIGHT=WEBCAM_HEIGHT):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = CAM_SRC
self.stream = cv2.VideoCapture(CAM_SRC)
self.stream.set(3,CAM_WIDTH)
self.stream.set(4,CAM_HEIGHT)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
self.stream.release()
#-----------------------------------------------------------------------------------------------
def trackPoint(grayimage1, grayimage2):
moveData = [] # initialize list of movementCenterPoints
biggestArea = MIN_AREA
# Get differences between the two greyed images
differenceImage = cv2.absdiff( grayimage1, grayimage2 )
# Blur difference image to enhance motion vectors
differenceImage = cv2.blur( differenceImage,(BLUR_SIZE,BLUR_SIZE ))
# Get threshold of blurred difference image based on THRESHOLD_SENSITIVITY variable
retval, thresholdImage = cv2.threshold( differenceImage, THRESHOLD_SENSITIVITY, 255, cv2.THRESH_BINARY )
try:
thresholdImage, contours, hierarchy = cv2.findContours( thresholdImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
except:
contours, hierarchy = cv2.findContours( thresholdImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
if contours != ():
for c in contours:
cArea = cv2.contourArea(c)
if cArea > biggestArea:
biggestArea = cArea
( x, y, w, h ) = cv2.boundingRect(c)
cx = int(x + w/2) # x center point of contour
cy = int(y + h/2) # y center point of contour
moveData = [cx, cy, w, h]
return moveData
#-----------------------------------------------------------------------------------------------
def playNotes( synthNow, octaveNow, moveData ):
global menuLock
global menuTime
# Get contour data for movement position
x, y, w, h = moveData[0], moveData[1], moveData[2], moveData[3]
if notePlayOn:
notexZone = int( x / notesHorizZone)
noteyZone = int( y / notesVertZone )
# Add entries to synthPicks array in config.py for available session synths
if synthHotOn: # Screen Hot Spot Area changes synthPick if movement inside area
if ( x < synthHotxy[0] and y < synthHotxy[1] ) and not menuLock:
menuLock = True
menuTime = time.time()
synthNow += 1
if synthNow > len(synthPicks) - 1:
synthNow = 0
synthCur = synthList[synthPicks[synthNow]] # Select current synth from your synthPicks
synthName = synthCur[1] # Get the synthName from synthCur
use_synth(Synth(synthName)) # Activate the selected synthName
# Add entries to octavePicks array in config.py for available session octaves
if octaveHotOn: # Screen Hot Spot Area changes octavePick if movement inside area
if ( x > CAMERA_WIDTH - octaveHotxy[0] and y < octaveHotxy[1] ) and not menuLock:
menuLock = True
menuTime = time.time()
octaveNow += 1
if octaveNow > len(octavePicks) - 1:
octaveNow = 0
octaveCur = octaveList[octavePicks[octaveNow]] # Select current synth from your synthPicks
octaveNotes = octaveCur[1] # Get the synthName from synthCur
note1 = octaveNotes[notexZone]
note2 = octaveNotes[noteyZone]
if noteSleepVarOn: # Vary note sleep duration based on contour height
noteDelay = h/float( CAMERA_HEIGHT/noteSleepMax )
if (noteDelay < noteSleepMin):
noteDelay = noteSleepMin
elif (noteDelay > noteSleepMax):
noteDelay = noteSleepMax
else: # Set fixed note sleep duration
noteDelay = noteSleepMin
if noteDoubleOn: # Generate two notes based on contour x, y rather than one
play(note1) # Based on x
sleep(noteDelay)
play(note2) # base on y
else:
play(note1)
sleep(noteDelay)
if verbose:
if noteDoubleOn:
print("Notes: zoneXY(%i,%i) moveXY(%i,%i) cArea(%i*%i)=%i" %
( notexZone, noteyZone, x, y, w, h, w*h ))
print(" Synth:%i %s Octave %i note1=%i note2=%i noteSleep=%.3f seconds" %
( synthCur[0], synthName, octaveCur[0], note1, note2, noteDelay ))
else:
print("Note: zoneX(%i) moveXY(%i,%i) cArea(%i*%i)=%i" %
( notexZone, x, y, w, h, w*h ))
print(" Synth:%i %s Octave %i note1=%i noteSleep=%.3f seconds" %
( synthCur[0], synthName, octaveCur[0], note1, noteDelay ))
if drumPlayOn:
drumxZone = int( x / drumHorizZone)
drumyZone = int( y / drumVertZone )
if drumSleepVarOn: # Vary note sleep duration based on contour height
drumDelay = h/float( CAMERA_HEIGHT/drumSleepMax )
if (drumDelay < drumSleepMin):
drumDelay = drumSleepMin
elif (drumDelay > drumSleepMax):
drumDelay = drumSleepMax
else: # Set fixed note sleep duration
drumDelay = drumSleepMin
# if drumHotOn:
# if ( x < drumHotxy[0] and y > synthHotxy[1] and y < synthHotxy[1] + drumHotxy[1] ) and not menuLock:
# menuLock = True
# menuTime = time.time()
# drumNow += 1
# if drumNow > len(drumPicks) - 1:
# drumNow = 0
drum1 = drumList[drumPicks[drumxZone]][1]
drum2 = drumList[drumPicks[drumyZone]][1]
if drumDoubleOn:
sample(drum1)
sleep(drumDelay)
sample(drum2)
else:
sample(drum1)
sleep(drumDelay)
if verbose:
if drumDoubleOn:
print("Drums: zoneXY(%i,%i) moveXY(%i,%i) cArea(%i*%i)=%i" %
( drumxZone, drumyZone, x, y, w, h, w*h ))
print(" %i %s %i %s drumSleep=%.3f sec" %
( drumList[drumPicks[drumxZone]][0], drum1,
drumList[drumPicks[drumyZone]][0], drum2, drumDelay ))
else:
print("Drum: zoneX(%i) moveXY(%i,%i) cArea(%i*%i)=%i" %
( drumxZone, x, y, w, h, w*h ))
print(" %i %s drumSleep=%.3f sec" %
( drumList[drumPicks[drumxZone]][0], drum1, drumDelay ))
if menuLock:
if (time.time() - menuTime > menuTimeout) :
menuLock = False # unlock motion menu after two second
return synthNow, octaveNow
#-----------------------------------------------------------------------------------------------
def sonicTrack():
global menuLock
global menuTime
menuTime = time.time()
menuLock = False
if windowOn:
print("press q to quit opencv display")
else:
print("press ctrl-c to quit")
print("Start Motion Tracking ....")
# initialize image1 using image2 (only done first time)
image2 = vs.read()
image1 = image2
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
still_scanning = True
synthNow = 0 # Initialize first synth selection from synthPicks
octaveNow = 0 # Initialize first synth selection from
# These Start Optional Threads for Target functions above
if drumBassOn:
bassThread = Thread(target=drumBass)
bassThread.start()
if drumSnareOn:
snareThread = Thread(target=drumSnare)
snareThread.start()
if drumKickOn:
kickThread = Thread(target=drumKick)
kickThread.start()
while still_scanning:
image2 = vs.read()
grayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
moveData = trackPoint(grayimage1, grayimage2)
grayimage1 = grayimage2
if moveData: # Found Movement
synthNow, octaveNow = playNotes(synthNow, octaveNow, moveData)
if windowOn:
cx = moveData[0]
cy = moveData[1]
# show small circle at motion location
if SHOW_CIRCLE:
cv2.circle(image2,(cx,cy),CIRCLE_SIZE, cvGreen, LINE_THICKNESS)
else:
cw = moveData[2]
ch = moveData[3]
cv2.rectangle(image2,(int(cx - cw/2),int(cy - ch/2)),(int(cx + cw/2), int(cy+ch/2)),
cvGreen, LINE_THICKNESS)
if windowOn:
if notePlayOn:
if synthHotOn: # Box top left indicating synthHotOn Area
cv2.rectangle(image2,(0,0), synthHotxy, cvBlue, LINE_THICKNESS)
synthText = synthList[synthPicks[synthNow]][1]
cv2.putText( image2, synthText, (5, int(synthHotxy[1]/2)),
cv2.FONT_HERSHEY_SIMPLEX, FONT_SCALE , cvGreen, 1)
if octaveHotOn: # Box top right indicating octave HotOn Area
cv2.rectangle(image2,(CAMERA_WIDTH - octaveHotxy[0], 0),
(CAMERA_WIDTH - 1,octaveHotxy[1]), cvBlue, LINE_THICKNESS)
octaveText = ("octave %i" % octavePicks[octaveNow])
cv2.putText( image2, octaveText, (CAMERA_WIDTH - int(octaveHotxy[0] - 5), int(octaveHotxy[1]/2)),
cv2.FONT_HERSHEY_SIMPLEX, FONT_SCALE , cvGreen, 1)
if drumPlayOn and drumHotOn:
for i in range ( drumHorizZone, CAMERA_WIDTH, drumHorizZone):
cv2.line( image2, (i, 0), (i,CAMERA_HEIGHT ), cvBlue, 1 )
if drumDoubleOn:
for i in range ( drumVertZone, CAMERA_HEIGHT, drumVertZone):
cv2.line( image2, (0, i), (CAMERA_WIDTH, i ), cvBlue, 1 )
if windowDiffOn:
cv2.imshow('Difference Image', differenceImage)
if windowThreshOn:
cv2.imshow('OpenCV Threshold', thresholdImage)
if windowBigger > 1: # Note setting a bigger window will slow the FPS
image2 = cv2.resize( image2,( big_w, big_h ))
cv2.imshow('Movement Status (Press q in Window to Quit)', image2)
# Close Window if q pressed while movement status window selected
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
vs.stop()
print("End Motion Tracking")
still_scanning = False
quit()
#-----------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
while True:
# Save Cam images frames to an in-program stream
# Setup video stream on a processor Thread for faster speed
if WEBCAM: # Start Web Cam stream (Note USB webcam must be plugged in)
print("Initializing USB Web Camera ....")
vs = WebcamVideoStream().start()
vs.CAM_SRC = WEBCAM_SRC
vs.CAM_WIDTH = WEBCAM_WIDTH
vs.CAM_HEIGHT = WEBCAM_HEIGHT
time.sleep(4.0) # Allow WebCam to initialize
else:
print("Initializing Pi Camera ....")
vs = PiVideoStream().start()
vs.camera.rotation = CAMERA_ROTATION
vs.camera.hflip = CAMERA_HFLIP
vs.camera.vflip = CAMERA_VFLIP
time.sleep(2.0) # Allow PiCamera to initialize
sonicTrack()
except KeyboardInterrupt:
vs.stop()
print("")
print("+++++++++++++++++++++++++++++++++++")
print("User Pressed Keyboard ctrl-c")
print("%s %s - Exiting" % (progName, progVer))
print("+++++++++++++++++++++++++++++++++++")
print("")
quit(0)