[wip}
This commit is contained in:
parent
266a31f0bb
commit
678aae0c94
34
README.md
34
README.md
@ -1,10 +1,42 @@
|
||||
# Redilysis = Redis + Audio Analysis
|
||||
|
||||
Redilysis sends audio analysis to a redis install. What's the use? Using that information for multiple visualizations, of course!
|
||||
Redilysis sends audio analysis to a redis server.
|
||||
|
||||
The idea is to share a single audio analysis to many Visual Jockey filters, in our case for lasers.
|
||||
|
||||
Two modes exist for now, you need to run two processes to get the complete experience!
|
||||
|
||||
### Spectrum Mode
|
||||
|
||||
This is the default mode.
|
||||
|
||||
It performs some frequency analysis (Fast Fourier Transform) to detect "energy" in the human audition bandwidths.
|
||||
|
||||
It will record if there is sound and at which frequencies.
|
||||
|
||||
It can run at sub-second frequency (100ms) with no problem.
|
||||
|
||||
It reports realistic data: spectrum analysis is the easy part.
|
||||
|
||||
### BPM Mode
|
||||
|
||||
This mode is more experimental.
|
||||
|
||||
It attempts to detect beats based on the
|
||||
|
||||
|
||||
## Keys and contents in Redis
|
||||
|
||||
bpm_time : (milliseconds integer timestamp) last update time
|
||||
onset
|
||||
bpm
|
||||
beats
|
||||
spectrum_time
|
||||
|
||||
## Installation
|
||||
|
||||
```python
|
||||
sudo apt install python-pyaudio python3
|
||||
git clone https://git.interhacker.space/tmplab/redilysis.git
|
||||
cd redilysis
|
||||
pip install -r requirements.txt
|
||||
|
113
redilysis.py
Normal file → Executable file
113
redilysis.py
Normal file → Executable file
@ -7,6 +7,7 @@ For more examples using PyAudio:
|
||||
https://github.com/mwickert/scikit-dsp-comm/blob/master/sk_dsp_comm/pyaudio_helper.py
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
import json
|
||||
import librosa
|
||||
@ -14,43 +15,40 @@ import numpy
|
||||
import os
|
||||
import pyaudio
|
||||
import redis
|
||||
import sys
|
||||
import time
|
||||
|
||||
def debug(*args, **kwargs):
|
||||
if( verbose == False ):
|
||||
return
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
|
||||
def list_devices():
|
||||
# List all audio input devices
|
||||
p = pyaudio.PyAudio()
|
||||
i = 0
|
||||
n = p.get_device_count()
|
||||
print("\nFound {} devices\n".format(n))
|
||||
print (" {} {}".format('ID', 'Device name'))
|
||||
while i < n:
|
||||
dev = p.get_device_info_by_index(i)
|
||||
if dev['maxInputChannels'] > 0:
|
||||
print (" {} {}".format(i, dev['name']))
|
||||
i += 1
|
||||
|
||||
# Define default variables.
|
||||
_BAND_RANGE = 96
|
||||
_BAND_RANGE = 7
|
||||
_CHANNELS = 1
|
||||
_ENERGY_THRESHOLD = 0.4
|
||||
_ENERGY_THRESHOLD = 0.1
|
||||
_FRAMES_PER_BUFFER = 4410
|
||||
_N_FFT = 4096
|
||||
_RATE = 44100
|
||||
_SAMPLING_FREQUENCY = 0.1
|
||||
|
||||
|
||||
# Argument parsing
|
||||
# Audio Args
|
||||
parser = argparse.ArgumentParser(prog='realtime_redis')
|
||||
parser.add_argument('--list-devices','-L', action='store_true', help='Which devices are detected by pyaudio')
|
||||
parser.add_argument('--mode','-m', required=False, default='spectrum', choices=['spectrum', 'bpm'], type=str, help='Which mode to use. Default=spectrum')
|
||||
parser.add_argument('--device','-d', required=False, type=int, help='Which pyaudio device to use')
|
||||
parser.add_argument('--frames','-f', required=False, default=4410, type=int, help='How many frames per buffer. Default={}'.format(_FRAMES_PER_BUFFER))
|
||||
#parser.add_argument('--frames','-f', required=False, default=4410, type=int, help='How many frames per buffer. Default={}'.format(_FRAMES_PER_BUFFER))
|
||||
parser.add_argument('--sampling-frequency','-s', required=False, default=0.1, type=float, help='Which frequency, in seconds. Default={}f '.format(_SAMPLING_FREQUENCY))
|
||||
parser.add_argument('--channels','-c', required=False, default=_CHANNELS, type=int, help='How many channels. Default={} '.format(_CHANNELS))
|
||||
parser.add_argument('--rate','-r', required=False, default=44100, type=int, help='Which rate. Default={} '.format(_RATE))
|
||||
parser.add_argument('--energy-threshold','-e', required=False, default=0.4, type=float, help='Which energy triggers spectrum detection flag. Default={} '.format(_ENERGY_THRESHOLD))
|
||||
# Redis Args
|
||||
parser.add_argument("-i","--ip",help="IP address of the Redis server ",default="127.0.0.1",type=str)
|
||||
parser.add_argument("-p","--port",help="Port of the Redis server ",default="6379",type=str)
|
||||
# Stardard Args
|
||||
parser.add_argument("-v","--verbose",action="store_true",help="Verbose")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Set real variables
|
||||
@ -58,61 +56,85 @@ BAND_RANGE = _BAND_RANGE
|
||||
CHANNELS = args.channels
|
||||
DEVICE = args.device
|
||||
ENERGY_THRESHOLD = args.energy_threshold
|
||||
FRAMES_PER_BUFFER = args.frames
|
||||
FRAMES_PER_BUFFER = int(args.rate * args.sampling_frequency )
|
||||
LIST_DEVICES = args.list_devices
|
||||
MODE = args.mode
|
||||
N_FFT = _N_FFT
|
||||
RATE = args.rate
|
||||
SAMPLING_FREQUENCY = args.sampling_frequency
|
||||
ip = args.ip
|
||||
port = args.port
|
||||
verbose = args.verbose
|
||||
|
||||
debug( "frames", FRAMES_PER_BUFFER)
|
||||
if( MODE == "bpm" and RATE < 0.5 ):
|
||||
debug( "You should use a --rate superior to 0.5 in BPM mode...")
|
||||
|
||||
|
||||
|
||||
# Define the frequency range of the log-spectrogram.
|
||||
F_LO = librosa.note_to_hz('C2')
|
||||
F_HI = librosa.note_to_hz('C9')
|
||||
M = librosa.filters.mel(RATE, N_FFT, BAND_RANGE, fmin=F_LO, fmax=F_HI)
|
||||
melFilter = librosa.filters.mel(RATE, N_FFT, BAND_RANGE, fmin=F_LO, fmax=F_HI)
|
||||
|
||||
|
||||
r = redis.Redis(
|
||||
host='localhost',
|
||||
port=6379)
|
||||
host=ip,
|
||||
port=port)
|
||||
|
||||
# Early exit to list devices
|
||||
# As it may crash later if not properly configured
|
||||
#
|
||||
def list_devices():
|
||||
# List all audio input devices
|
||||
p = pyaudio.PyAudio()
|
||||
i = 0
|
||||
n = p.get_device_count()
|
||||
debug("\nFound {} devices\n".format(n))
|
||||
debug (" {} {}".format('ID', 'Device name'))
|
||||
while i < n:
|
||||
dev = p.get_device_info_by_index(i)
|
||||
if dev['maxInputChannels'] > 0:
|
||||
debug (" {} {}".format(i, dev['name']))
|
||||
i += 1
|
||||
if( LIST_DEVICES ):
|
||||
list_devices()
|
||||
os._exit(1)
|
||||
|
||||
p = pyaudio.PyAudio()
|
||||
|
||||
|
||||
# global
|
||||
bpm = 120.0
|
||||
|
||||
def m_bpm(audio_data):
|
||||
"""
|
||||
This function saves slow analysis to redis
|
||||
* onset
|
||||
* bpm
|
||||
* beat
|
||||
"""
|
||||
global bpm
|
||||
|
||||
# Get RMS
|
||||
rms = librosa.feature.rmse( audio_data )
|
||||
|
||||
if( bpm <= 10):
|
||||
bpm = 10
|
||||
onset = librosa.onset.onset_detect(
|
||||
y=audio_data,
|
||||
sr=RATE)
|
||||
new_bpm, beats = librosa.beat.beat_track(
|
||||
y=audio_data,
|
||||
sr=RATE,
|
||||
trim=False,
|
||||
start_bpm=bpm,
|
||||
units="time"
|
||||
y = audio_data,
|
||||
sr = RATE
|
||||
)
|
||||
print ( bpm, new_bpm)
|
||||
# Save spectrum
|
||||
new_bpm, beats = librosa.beat.beat_track(
|
||||
y = audio_data,
|
||||
sr = RATE,
|
||||
trim = False,
|
||||
start_bpm = bpm,
|
||||
units = "time"
|
||||
)
|
||||
|
||||
# Save to Redis
|
||||
r.set( 'onset', json.dumps( onset.tolist() ) )
|
||||
r.set( 'bpm', json.dumps( new_bpm ) )
|
||||
r.set( 'beats', json.dumps( beats.tolist() ) )
|
||||
bpm = new_bpm
|
||||
debug( "bpm:{} onset:{} beats:{}".format(bpm,onset,beats) )
|
||||
return True
|
||||
|
||||
def m_spectrum(audio_data):
|
||||
@ -120,13 +142,14 @@ def m_spectrum(audio_data):
|
||||
This function saves fast analysis to redis
|
||||
* spectrum
|
||||
* RMS
|
||||
* tuning
|
||||
"""
|
||||
|
||||
# Compute real FFT.
|
||||
x_fft = numpy.fft.rfft(audio_data, n=N_FFT)
|
||||
fft = numpy.fft.rfft(audio_data, n=N_FFT)
|
||||
|
||||
# Compute mel spectrum.
|
||||
melspectrum = M.dot(abs(x_fft))
|
||||
melspectrum = melFilter.dot(abs(fft))
|
||||
|
||||
# Get RMS
|
||||
rms = librosa.feature.rmse( S=melspectrum, frame_length=FRAMES_PER_BUFFER )
|
||||
@ -134,13 +157,10 @@ def m_spectrum(audio_data):
|
||||
# Initialize output characters to display.
|
||||
bit_list = [0]*BAND_RANGE
|
||||
count = 0
|
||||
|
||||
highest_index = -1
|
||||
highest_value = 0
|
||||
|
||||
for i in range(BAND_RANGE):
|
||||
val = melspectrum[i]
|
||||
|
||||
# If this is the highest tune, record it
|
||||
if( val > highest_value ) :
|
||||
highest_index = i
|
||||
@ -149,9 +169,10 @@ def m_spectrum(audio_data):
|
||||
# If there is energy in this frequency, mark it
|
||||
if val > ENERGY_THRESHOLD:
|
||||
count += 1
|
||||
bit_list[i] = 1
|
||||
bit_list[i] = val
|
||||
|
||||
# Save to redis
|
||||
debug( 'rms:{} bit_list:{} highest_index:{}'.format(rms , bit_list, highest_index ))
|
||||
r.set( 'rms', "{}".format(rms.tolist()) )
|
||||
r.set( 'spectrum', json.dumps( bit_list ) )
|
||||
r.set( 'tuning', highest_index )
|
||||
@ -167,14 +188,18 @@ def callback(in_data, frame_count, time_info, status):
|
||||
elif MODE == 'bpm':
|
||||
m_bpm( audio_data)
|
||||
else:
|
||||
print( "Unknown mode. Exiting")
|
||||
debug( "Unknown mode. Exiting")
|
||||
os._exit(2)
|
||||
end = time.time()
|
||||
print ("\rLoop took {:.2}s on {}s ".format(end - start, SAMPLING_FREQUENCY), end="")
|
||||
# debug ("\rLoop took {:.2}s on {}s ".format(end - start, SAMPLING_FREQUENCY))
|
||||
return (in_data, pyaudio.paContinue)
|
||||
|
||||
|
||||
print( "\n\nRunning! Using mode {}.\n\n".format(MODE))
|
||||
debug( "\n\nRunning! Using mode {}.\n\n".format(MODE))
|
||||
if MODE == 'spectrum':
|
||||
debug("In this mode, we will set keys: rms, spectrum, tuning")
|
||||
elif MODE == 'bpm':
|
||||
debug("In this mode, we will set keys: onset, bpm, beats")
|
||||
|
||||
stream = p.open(format=pyaudio.paFloat32,
|
||||
channels=CHANNELS,
|
||||
|
@ -1,5 +1,2 @@
|
||||
Redilysis
|
||||
librosa=0.6.1
|
||||
numpy=1.14.2
|
||||
pyaudio
|
||||
librosa==0.6.1
|
||||
redis
|
||||
|
Loading…
Reference in New Issue
Block a user