Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated syntax and add workflow #346

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: Python package

on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:
build:

runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.7, 3.8, 3.9]

steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
4 changes: 3 additions & 1 deletion pyAudioAnalysis/ShortTermFeatures.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import matplotlib.pyplot as plt
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
from scikits.talkbox import lpc
from tqdm import tqdm

eps = sys.float_info.epsilon
Expand Down Expand Up @@ -604,6 +605,7 @@ def feature_extraction(signal, sampling_rate, window, step, deltas=True):
feature_names = feature_names_2

features = []
feature_vector_prev = None
# for each short-term window to end of signal
while current_position + window - 1 < number_of_samples:
count_fr += 1
Expand Down Expand Up @@ -669,7 +671,7 @@ def feature_extraction(signal, sampling_rate, window, step, deltas=True):
features.append(feature_vector)
else:
# delta features
if count_fr > 1:
if count_fr > 1 and feature_vector_prev:
delta = feature_vector - feature_vector_prev
feature_vector_2 = np.concatenate((feature_vector, delta))
else:
Expand Down
4 changes: 2 additions & 2 deletions pyAudioAnalysis/data/recordRadio.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,9 @@ def recordStation(stationName, outputName, sleepTime = -1, Listen = False):
r.listen = Listen
r.start()

print r.bus()
print(r.bus())
if sleepTime<=0:
raw_input('Press [Enter] to stop')
input('Press [Enter] to stop')
else:
time.sleep(sleepTime)
r.stop()
Expand Down
24 changes: 16 additions & 8 deletions pyAudioAnalysis/data/testComputational.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,31 +16,35 @@ def main(argv):
t1 = time.time()
F = MidTermFeatures.short_term_feature_extraction(x, Fs, 0.050 * Fs, 0.050 * Fs);
t2 = time.time()
perTime1 = duration / (t2-t1); print "short-term feature extraction: {0:.1f} x realtime".format(perTime1)
perTime1 = duration / (t2-t1)
print("short-term feature extraction: {0:.1f} x realtime".format(perTime1))
elif argv[1] == "-classifyFile":
for i in range(nExp):
[Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.time()
aT.file_classification("diarizationExample.wav", "svmSM", "svm")
t2 = time.time()
perTime1 = duration / (t2-t1); print "Mid-term feature extraction + classification \t {0:.1f} x realtime".format(perTime1)
perTime1 = duration / (t2-t1)
print("Mid-term feature extraction + classification \t {0:.1f} x realtime".format(perTime1))
elif argv[1] == "-mtClassify":
for i in range(nExp):
[Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.time()
[flagsInd, classesAll, acc] = aS.mid_term_file_classification("diarizationExample.wav", "svmSM", "svm", False, '')
t2 = time.time()
perTime1 = duration / (t2-t1); print "Fix-sized classification - segmentation \t {0:.1f} x realtime".format(perTime1)
perTime1 = duration / (t2-t1)
print("Fix-sized classification - segmentation \t {0:.1f} x realtime".format(perTime1))
elif argv[1] == "-hmmSegmentation":
for i in range(nExp):
[Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.time()
aS.hmm_segmentation('diarizationExample.wav', 'hmmRadioSM', False, '')
t2 = time.time()
perTime1 = duration / (t2-t1); print "HMM-based classification - segmentation \t {0:.1f} x realtime".format(perTime1)
perTime1 = duration / (t2-t1)
print("HMM-based classification - segmentation \t {0:.1f} x realtime".format(perTime1))
elif argv[1] == "-silenceRemoval":
for i in range(nExp):
[Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav");
Expand All @@ -49,31 +53,35 @@ def main(argv):
[Fs, x] = audioBasicIO.read_audio_file("diarizationExample.wav");
segments = aS.silence_removal(x, Fs, 0.050, 0.050, smooth_window= 1.0, Weight = 0.3, plot = False)
t2 = time.time()
perTime1 = duration / (t2-t1); print "Silence removal \t {0:.1f} x realtime".format(perTime1)
perTime1 = duration / (t2-t1)
print("Silence removal \t {0:.1f} x realtime".format(perTime1))
elif argv[1] == "-thumbnailing":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.read_audio_file("scottish.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.time()
[A1, A2, B1, B2, Smatrix] = aS.music_thumbnailing(x1, Fs1, 1.0, 1.0, 15.0) # find thumbnail endpoints
t2 = time.time()
perTime1 = duration1 / (t2-t1); print "Thumbnail \t {0:.1f} x realtime".format(perTime1)
perTime1 = duration1 / (t2-t1)
print("Thumbnail \t {0:.1f} x realtime".format(perTime1))
elif argv[1] == "-diarization-noLDA":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.read_audio_file("diarizationExample.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.time()
aS.speaker_diarization("diarizationExample.wav", 4, LDAdim = 0, PLOT = False)
t2 = time.time()
perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1)
perTime1 = duration1 / (t2-t1)
print("Diarization \t {0:.1f} x realtime".format(perTime1))
elif argv[1] == "-diarization-LDA":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.read_audio_file("diarizationExample.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.time()
aS.speaker_diarization("diarizationExample.wav", 4, PLOT = False)
t2 = time.time()
perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1)
perTime1 = duration1 / (t2-t1)
print("Diarization \t {0:.1f} x realtime".format(perTime1))

if __name__ == '__main__':
main(sys.argv)
Expand Down