redilysis/redilysis.py

275 lines
9.0 KiB
Python
Executable File

#! /usr/bin/python2.7
"""
Sends live audio analysis to the terminal.
Based on musicinformationretrieval.com/realtime_spectrogram.py
For more examples using PyAudio:
https://github.com/mwickert/scikit-dsp-comm/blob/master/sk_dsp_comm/pyaudio_helper.py
"""
from __future__ import print_function
import argparse
import json
import librosa
import math
import numpy
import os
import pyaudio
import redis
import statistics
import sys
import time
def debug(*args, **kwargs):
if( verbose == False ):
return
print(*args, file=sys.stderr, **kwargs)
# Define default variables.
BAND_OCTAVES = 10 # 12 * 9 octaves
_BAND_TONES = BAND_OCTAVES * 12 # octaves * notes per octave
_CHANNELS = 1
_FRAMES_PER_BUFFER = 4410
_N_FFT = 4096
_RATE = 44100
_SAMPLING_FREQUENCY = 0.1
_BPM_MIN=10
_BPM_MAX=400
# Argument parsing
parser = argparse.ArgumentParser(prog='realtime_redis')
# Standard Args
parser.add_argument("-v","--verbose",action="store_true",help="Verbose")
# Redis Args
parser.add_argument("-i","--ip",help="IP address of the Redis server ",default="127.0.0.1",type=str)
parser.add_argument("-p","--port",help="Port of the Redis server ",default="6379",type=str)
# Audio Capture Args
parser.add_argument('--list-devices','-L', action='store_true', help='Which devices are detected by pyaudio')
parser.add_argument('--mode','-m', required=False, default='spectrum', choices=['spectrum', 'bpm'], type=str, help='Which mode to use. Default=spectrum')
parser.add_argument('--device','-d', required=False, type=int, help='Which pyaudio device to use')
parser.add_argument('--sampling-frequency','-s', required=False, default=0.1, type=float, help='Which frequency, in seconds. Default={}f '.format(_SAMPLING_FREQUENCY))
parser.add_argument('--channels','-c', required=False, default=_CHANNELS, type=int, help='How many channels. Default={} '.format(_CHANNELS))
parser.add_argument('--rate','-r', required=False, default=44100, type=int, help='The audio capture rate in Hz. Default={} '.format(_RATE))
parser.add_argument('--frames','-f', required=False, default=4410, type=int, help='How many frames per buffer. Default={}'.format(_FRAMES_PER_BUFFER))
# BPM Mode Args
parser.add_argument('--bpm-min', required=False, default=_BPM_MIN, type=int, help='BPM mode only. The low BPM threshold. Default={} '.format(_BPM_MIN))
parser.add_argument('--bpm-max', required=False, default=_BPM_MAX, type=int, help='BPM mode only. The high BPM threshold. Default={} '.format(_BPM_MAX))
args = parser.parse_args()
# global
bpm = 120.0
start = 0
# Set real variables
F_LO = librosa.note_to_hz('C0')
F_HI = librosa.note_to_hz('C10')
BAND_TONES = _BAND_TONES
N_FFT = _N_FFT
CHANNELS = args.channels
DEVICE = args.device
FRAMES_PER_BUFFER = int(args.rate * args.sampling_frequency )
LIST_DEVICES = args.list_devices
MODE = args.mode
RATE = args.rate
SAMPLING_FREQUENCY = args.sampling_frequency
bpm_min = args.bpm_min
bpm_max = args.bpm_max
ip = args.ip
port = args.port
verbose = args.verbose
melFilter = librosa.filters.mel(RATE, N_FFT, BAND_TONES, fmin=F_LO, fmax=F_HI)
r = redis.Redis(
host=ip,
port=port)
# Early exit to list devices
# As it may crash later if not properly configured
#
def list_devices():
# List all audio input devices
p = pyaudio.PyAudio()
i = 0
n = p.get_device_count()
print("\nFound {} devices\n".format(n))
print(" {} {}".format('ID', 'Device name'))
while i < n:
dev = p.get_device_info_by_index(i)
if dev['maxInputChannels'] > 0:
print(" {} {}".format(i, dev['name']))
i += 1
if( LIST_DEVICES ):
list_devices()
os._exit(1)
def m_bpm(audio_data):
"""
This function saves slow analysis to redis
* bpm
* beat
"""
global bpm
global start
# Detect tempo / bpm
new_bpm, beats = librosa.beat.beat_track(
y = audio_data,
sr = RATE,
trim = False,
#start_bpm = bpm,
units = "time"
)
'''
new_bpm = librosa.beat.tempo(y = audio_data, sr=RATE)[0]
'''
# Correct the eventual octave error
if new_bpm < bpm_min or new_bpm > bpm_max:
found = False
octaveErrorList = [ 0.5, 2, 0.3333, 3 ]
for key,factor in enumerate(octaveErrorList):
correction = new_bpm * factor
if correction > bpm_min and correction < bpm_max:
debug( "Corrected high/low bpm:{} to:{}".format(new_bpm, correction))
new_bpm = correction
found = True
break
if found == False:
if new_bpm < bpm_min :
new_bpm = bpm_min
else :
new_bpm = bpm_max
debug("new_bpm:{}".format(new_bpm))
'''
How to guess the next beats based on the data sent to redis
~~ A Dirty Graph ~~
|start end|
Capture |........................|
BPM detect+Redis set ||
Client Redis get |
Time |........................||.............|
---SAMPLING_FREQUENCY----
- < TIME-START
Read Delay --------------- < 2*SAMPLING_FREQUENCY - PTTL
Delay -----------------------------------------
Beats |last beat
. known ...b....b....b....b....b.
. passed (...b....b....b.)
. guessed (..b....b....b....b...
Next Beat Calculation b....b....b....b.|..b
Beats |last beat
0 1 2 3 4
Redis:
key bpm_sample_interval
visual |........................|
key bpm_delay
visual |.........................|
'''
bpm = new_bpm
bpm_sample_interval = SAMPLING_FREQUENCY * 1000
bpm_delay = (SAMPLING_FREQUENCY + time.time() - start ) * 1000
pexpireat = int( 2 * bpm_sample_interval);
# Save to Redis
r.set( 'bpm', round(bpm,2), px = pexpireat )
r.set( 'bpm_sample_interval', bpm_sample_interval )
r.set( 'bpm_delay', bpm_delay )
r.set( 'beats', json.dumps( beats.tolist() ) )
#debug( "pexpireat:{}".format(pexpireat))
debug( "bpm:{} bpm_delay:{} bpm_sample_interval:{} beats:{}".format(bpm,bpm_delay,bpm_sample_interval,beats) )
return True
def m_spectrum(audio_data):
"""
This function saves fast analysis to redis
"""
# Compute real FFT.
fft = numpy.fft.rfft(audio_data, n=N_FFT)
# Compute mel spectrum.
melspectrum = melFilter.dot(abs(fft))
# Initialize output characters to display.
spectrum_120 = [0]*BAND_TONES
spectrum_10 = [0]*BAND_OCTAVES
spectrum_oct = [[] for i in range(10)]
# Assign values
for i in range(BAND_TONES):
val = round(melspectrum[i],2)
spectrum_120[i] = val
key = int(math.floor( i / 12 ))
spectrum_oct[key].append(val)
for i in range(BAND_OCTAVES):
spectrum_10[i] = round(sum( spectrum_oct[i] ) / len( spectrum_oct[i]),2)
# Get RMS
#rms = librosa.feature.rms( S=melspectrum )
rms = librosa.feature.rms( y=audio_data ).tolist()[0]
rms_avg = round(sum(rms) / len(rms),2)
# Save to redis
#debug( 'spectrum_120:{} '.format(spectrum_120))
#debug( 'spectrum_10:{}'.format(spectrum_10))
#debug( 'rms:{}'.format(rms_avg))
if len(spectrum_120): r.set( 'spectrum_120', json.dumps( spectrum_120 ) )
if len(spectrum_10): r.set( 'spectrum_10', json.dumps( spectrum_10 ) )
if rms : r.set( 'rms', "{}".format(rms_avg) )
return True
def callback(in_data, frame_count, time_info, status):
audio_data = numpy.fromstring(in_data, dtype=numpy.float32)
global start
start = time.time()
if MODE == 'spectrum':
m_spectrum(audio_data)
elif MODE == 'bpm':
m_bpm( audio_data)
else:
debug( "Unknown mode. Exiting")
os._exit(2)
end = time.time()
debug ("\rLoop took {:.2}s on {}s ".format(end - start, SAMPLING_FREQUENCY))
return (in_data, pyaudio.paContinue)
debug( "\n\nRunning! Using mode {}.\n\n".format(MODE))
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=CHANNELS,
rate=RATE,
input=True, # Do record input.
output=False, # Do not play back output.
frames_per_buffer=FRAMES_PER_BUFFER,
input_device_index = DEVICE,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(SAMPLING_FREQUENCY)
stream.stop_stream()
stream.close()
p.terminate()