In [1]:
import numpy, scipy, matplotlib.pyplot as plt, sklearn, urllib, stanford_mir, IPython.display
%matplotlib inline
plt.rcParams['figure.figsize'] = (14, 5)

Principal Component Analysis

Download a file:

In [2]:
filename = '125_bounce.wav'
In [3]:
url = 'http://audio.musicinformationretrieval.com/'
urllib.urlretrieve(url + filename, filename=filename)
Out[3]:
('125_bounce.wav', <httplib.HTTPMessage instance at 0x109afb248>)

Load a file:

In [4]:
x, fs = librosa.load(filename)

Listen to the signal:

In [5]:
IPython.display.Audio(x, rate=fs)
Out[5]:

Compute some features:

In [6]:
X = librosa.feature.mfcc(x, sr=fs)
In [7]:
print X.shape
(20, 331)

Scale the features to have zero mean and unit variance:

In [8]:
X = sklearn.preprocessing.scale(X)
In [9]:
X.mean()
Out[9]:
-4.2933095816320253e-18

Create a PCA model object.

In [10]:
model = sklearn.decomposition.PCA(n_components=2, whiten=True)

Apply PCA to the scaled features:

In [11]:
model.fit(X.T)
Out[11]:
PCA(copy=True, n_components=2, whiten=True)
In [12]:
Y = model.transform(X.T)
In [13]:
print Y.shape
(331, 2)

Let's see how many principal components were returned:

In [14]:
model.components_.shape
Out[14]:
(2, 20)

Plot the two top principal components for each data point:

In [15]:
plt.scatter(Y[:,0], Y[:,1])
Out[15]:
<matplotlib.collections.PathCollection at 0x10aebbc10>