I needed to generate some correlated variables to test some correlation coefficients, similar to the data sets in this Wikipedia article. I modified the code found in the instructions on the use of the MINE library in Python.

%matplotlib inline
import matplotlib.pyplot as plt
def mySubPlot(plotData,
              numRows,
              numCols,
              plotNum,
              xlim=(-4, 4),
              ylim=(-4, 4)):

    ax = plt.subplot(numRows, numCols, plotNum, xlim=xlim, ylim=ylim)
    ax.set_title(plotData['name'],fontsize=8)
    ax.set_frame_on(False)
    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)
    ax.plot(plotData['data'][:, 0], plotData['data'][:, 1], ',')
    ax.set_xticks([])
    ax.set_yticks([])
    return ax

plotLims = {'Polynomial \nwith uniform noise': [(-1, 1), (-1/3, 1+1/3)],
            'Uniform rotated pi/8' :[(-np.sqrt(2+np.sqrt(2)) / np.sqrt(2), np.sqrt(2+np.sqrt(2)) / np.sqrt(2)), (-np.sqrt(2+np.sqrt(2)) / np.sqrt(2), np.sqrt(2+np.sqrt(2)) / np.sqrt(2))],
            'Uniform rotated 2*pi/8' : [(-np.sqrt(2), np.sqrt(2)), (-np.sqrt(2), np.sqrt(2))],
            'Square \nwith uniform noise':[(-1, 1), (-1, 3)],
            'Square with noise \nrandomly reflected in x':[(-1.5, 1.5), (-1.5, 1.5)],
            'Sin x \ncos x with noise':[(-1.5, 1.5), (-1.5, 1.5)],
            'Mixture of 4 normals': [(-7, 7), (-7, 7)]
           }


plt.figure(facecolor='white', figsize=(12, 9))
for i, data in enumerate(makeMvNormal(n=800)):
    mySubPlot(data, 3, 7, i + 1)

for i, data in enumerate(makeRotNormal(n=200)):
    mySubPlot(data, 3, 7, i + 8)

for i, data in enumerate(makeOtherDist(n=800)):
    mySubPlot(data, 3, 7, i + 15, plotLims[data['name']][0], plotLims[data['name']][1])

plt.tight_layout()
plt.show()

png

Gaussian mixture models in PyMc.

%matplotlib inline
import numpy as np, seaborn as sb, math, matplotlib.pyplot as plt, pandas as pd

Generate and plot some sample data.

params = [
    (150, 10, 0.7),
    (200, 20, 0.3)
]

no_samples = 1000
dist = [np.random.normal(mu, sigma, math.floor(mixing_prob * no_samples)) for (mu, sigma, mixing_prob) in params]
xs = np.array([item for sublist in dist for item in sublist])
sb.distplot(xs)
<matplotlib.axes._subplots.AxesSubplot at 0x7f23e3d327b8>

png

Use catetorical variable to model assignment to cluster. P represents the mixing probability.

import pymc as pm
p = pm.Uniform("p", 0, 1)
assignment = pm.Categorical("assignment", [p, 1 - p], size = xs.shape[0])

Model stds as uniform on 0 100 and centers as normaly distributed at roughly the peaks we can see in the histogram. Pick precision to be sufficiently vague.

taus = 1.0 / pm.Uniform("stds", 0, 100, size = 2) **2
centers = pm.Normal("centers", [145, 210], [0.01, 0.02], size = 2)
@pm.deterministic
def center_i(assignment=assignment, centers=centers):
    return centers[assignment]

@pm.deterministic
def tau_i(assignment=assignment, taus=taus):
    return taus[assignment]

observations = pm.Normal("obs", center_i, tau_i, value=xs, observed=True)

model = pm.Model([p, assignment, observations, taus, centers])

Use MCMC to fit the parameters of the model.

mcmc = pm.MCMC(model)
mcmc.sample(40000, 10000)
mcmc.sample(10000)
 [-----------------100%-----------------] 10000 of 10000 complete in 5.9 sec
pm.Matplot.plot(mcmc)
Plotting p
Plotting centers_0
Plotting centers_1
Plotting stds_0
Plotting stds_1

png

png

png

#print(mcmc.stats())
#mcmc.stats()['stds']['95% HPD interval']
center_trace = mcmc.trace("centers")[:]

centers = [['Center 1', x] for x in center_trace[:, 0]]
centers += [['Center 2', x] for x in center_trace[:, 1]]

df = pd.DataFrame(data = centers)
df.columns = ['Posterior', 'Value']

g = sb.FacetGrid(df, col= 'Posterior')
g.map(plt.hist, 'Value');

png

std_trace = mcmc.trace("stds")[:]

stds = [['Std 1', x] for x in std_trace[:, 0]]
stds += [['Std 2', x] for x in std_trace[:, 1]]

df = pd.DataFrame(data = stds)
df.columns = ['Std', 'Value']

g = sb.FacetGrid(df, col= 'Std')
g.map(plt.hist, 'Value');

png

Use SciKit learn’s Gaussian Mixture Model to compare results.

from sklearn import mixture

xs1= np.array([[x] for x in xs])
m = mixture.GMM(2).fit(xs1)

m.means_
array([[ 199.04754288],
       [ 150.20537679]])
m.covars_ ** 0.5
array([[ 20.8382151 ],
       [  9.98902017]])

The GMM model corresponds well to the PyMC model.

MCMC didn’t scale very well with more observed data, this was probably due to the low acceptance rate of the categorical variable. The model was sensitive to the priors on the center variables, particulary the precision parameter.

A common formulation of the Brier score is:

where $f_t$ is the probability of the forcast, $o_t$ the outcome (1 if it happens 0 otherwise) and $N$ is the number of forcasting instances. It can be thought as either a measure of the “calibration” of a set of probabilistic predictions, or as a “cost function”.

As an example, suppose a forcaster predicts the probability of rain as always 80%.

forcast_prob = 0.8

Lets’s compare the Brier scores for the forcaster when the actual probability of rain is 80% and 20%.

import numpy as np
actual_probs = np.array([0.8, 0.2])

def brierScore(preds, outcomes):
    n = float(len(preds))
    return 1 / n * np.sum((preds - outcomes)**2)

Simulate rainy days and compare Brier scores.

number_of_days = 100

def initArray(size, value):
    a = np.empty(size)
    a.fill(value)
    return a

prediction_probs = initArray(number_of_days, forcast_prob)
rainy_days = [ np.random.random_sample(number_of_days) < p for p in actual_probs]

brier_scores = [brierScore(prediction_probs, outcomes) for outcomes in rainy_days]
brier_scores
[0.13, 0.52600000000000025]

We can see that a Brier score closer to zero is a better. With 0 being the best achievable and 1 the worst.

The inequality states that a random variable $X$ with mean $\mu$ and variance $\sigma^2$,

This guarantees that in any probability distribution most values are ‘close’ to the mean. At most $\frac{1}{9}$ of the values are $3$ standard deviations from the mean.

import numpy as np
import pandas as pd
size = 1000
data = [
['Gamma' , np.random.gamma(1., 2., size)],
['Normal', np.random.normal(0, 2., size)],
['Exponential', np.random.exponential(0.9, size)]]
def countNumberFromCStdOfMean(values, c):
    std = np.std(values)
    mean = np.mean(values)
    return np.sum(np.absolute(values - mean) >= c * std)
c = 3.0
results = [[ d[0] , np.mean(d[1]), np.std(d[1]), countNumberFromCStdOfMean(d[1], c)] for d in data]
df = pd.DataFrame(
    data = results)
df.columns = ['Distribution', 'Mean', 'Std', 'Number c stds from mean']
df
Distribution Mean Std 3 $\sigma$ from $\mu$ count
0 Gamma 1.888626 1.801146 20
1 Normal -0.051050 2.027534 1
2 Exponential 0.930995 0.931055 16

Chebychev’s inequality states that no more that 111 data points are expected to be 3 stds from the mean when the sample size is 1000. If we know the probability distribution, much closer bounds can be produced.

This follows the analysis from this blog post notebook here.

%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import scipy as sp
import pandas as pd
import sklearn
import seaborn as sns
from matplotlib import pyplot as plt
import sklearn.cross_validation

Load the wine data set, define a ‘good’ wine as one which has a quality above 7 and display the head of the dataset.

wine_df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv", sep=";")
Y = wine_df.quality.values
wine_df = wine_df.drop('quality', axis = 1)
Y = np.asarray([1 if i >= 7 else 0 for i in Y])
wine_df.head()
fixed acidity volatile acidity citric acid residual sugar chlorides free sulfur dioxide total sulfur dioxide density pH sulphates alcohol
0 7.4 0.70 0.00 1.9 0.076 11 34 0.9978 3.51 0.56 9.4
1 7.8 0.88 0.00 2.6 0.098 25 67 0.9968 3.20 0.68 9.8
2 7.8 0.76 0.04 2.3 0.092 15 54 0.9970 3.26 0.65 9.8
3 11.2 0.28 0.56 1.9 0.075 17 60 0.9980 3.16 0.58 9.8
4 7.4 0.70 0.00 1.9 0.076 11 34 0.9978 3.51 0.56 9.4
X = wine_df.as_matrix()
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score

Train Random Forests with different number of predictors, using cross validation to get an estimate of the prediction accuracy. Below are the results using both prediction accuracy and F1 score.

scores = []
for val in range(1, 41):
    clf = RandomForestClassifier(n_estimators= val)
    validated = cross_val_score(clf, X, Y, cv = 10)
    scores.append(validated)
clf1 = RandomForestClassifier(n_estimators=2)
validated = cross_val_score(clf1, X, Y, cv = 10)
validated
array([ 0.8757764 ,  0.86956522,  0.81875   ,  0.85625   ,  0.89375   ,
        0.88125   ,  0.84375   ,  0.85534591,  0.83018868,  0.8427673 ])
len_y = len(Y)
temp = [i for i in Y if i == 0]
temp_1 = temp.count(0)

percentage = float(temp_1)/float(len_y) * 100

sns.boxplot(data=scores)
plt.xlabel('Number of trees')
plt.ylabel('Classification scores')
plt.title('Classification score for number of trees')
plt.show()

png

scores = []
for val in range(1, 41):
    clf = RandomForestClassifier(n_estimators= val)
    validated = cross_val_score(clf, X, Y, cv = 10, scoring = "f1")
    scores.append(validated)

sns.boxplot(data=scores)
plt.xlabel('Number of trees')
plt.ylabel('F1 score')
plt.title('F1 scores as a function of number of trees')
plt.show()

png

Typically deteremine class membership if the predicted probability is above a half.

clf = RandomForestClassifier(n_estimators= 15)
clf.fit(X, Y)
(clf.predict_proba(X)[:, 1] > 0.5).astype(int)
array([0, 0, 0, ..., 0, 0, 0])

Determine the a good cutoff point from prediction probability for class membership

def cutoff_predict(clf, X, cutoff):
    return (clf.predict_proba(X)[:, 1]> cutoff).astype(int)

def custom_f1(cutoff):
    def f1_cutoff(clf, X, y):
        ypred = cutoff_predict(clf, X, cutoff)
        return sklearn.metrics.f1_score(y, ypred)

    return f1_cutoff
scores = []

for cutoff in np.arange(0.1, 0.9, 0.1):
    clf = RandomForestClassifier(n_estimators= 15)
    validated = cross_val_score(clf, X, Y, cv = 10, scoring = custom_f1(cutoff))
    scores.append(validated)
sns.boxplot(y=scores, x = np.arange(0.1, 0.9, 0.1))
plt.title('F scores for each tree')
plt.xlabel('each cut off value')
plt.ylabel('custom F score')
plt.show()

png

A cutoff abut 0.3 - 0.5 appears to give best predictive performance. Cutoff less than 0.5 as the training set is imbalanced.

clf = RandomForestClassifier(n_estimators= 15)
clf.fit(X, Y)

imp = clf.feature_importances_
names = wine_df.columns

imp, names = zip(*sorted(zip(imp, names)))

plt.barh(range(len(names)), imp, align = 'center')
plt.yticks(range(len(names)), names)

plt.xlabel('Importance of features')
plt.ylabel('Features')
plt.title('Importance of each feature')
plt.show()

png

Importance in random forests is determined by randomly changing the feature to a value in the data set and measure how the accuracy drops. The feature which if mutated drops the accuracy the most is the most important.

from sklearn.tree import DecisionTreeClassifier
import sklearn.linear_model
import sklearn.svm

def plot_decision_surface(clf, X_train, Y_train):
    plot_step = 0.1
    if X_train.shape[1] != 2:
        raise ValueError("X_train should have exactly 2 columns!")

    x_min, x_max = X_train[:, 0].min() - plot_step, X_train[:, 0].max() + plot_step
    y_min, y_max = X_train[:, 1].min() - plot_step, X_train[:, 1].max() + plot_step
    xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
                        np.arange(y_min, y_max, plot_step))

    clf.fit(X_train, Y_train)
    if hasattr(clf, 'predic_proba'):
        Z = clf.predic_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
    else:
        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    Z = Z.reshape(xx.shape)
    cs = plt.contourf(xx, yy, Z, cmap = plt.cm.Reds)
    plt.scatter(X_train[:,0], X_train[:, 1], c = Y_train, cmap = plt.cm.Paired)
    plt.show()

Plot the decision boundary between the important features and compare with other algorithms.

imp_fe = np.argsort(imp)[::-1][0:2]
X_imp = X[:, imp_fe]

algorithms = [DecisionTreeClassifier(), RandomForestClassifier(), sklearn.svm.SVC(C = 100.0, gamma = 1)]
title = ['Decision Tree', 'Random Forest', 'SVM']

for i in xrange(3):
    plt.title(title[i])
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plot_decision_surface(algorithms[i], X_imp, Y)

png

png

png

Decision surface for the decision tree and random forest are complex. Random forest is less sensitive, with isolated points having less extreme classification probabilities. SVM has smooth decision boundary.

svm = [sklearn.svm.SVC(C = 1.0, gamma = 1.0, class_weight = None),
      sklearn.svm.SVC(C = 1.0, gamma = 1.0, class_weight = 'auto')]
title = ['Svm without class weight', 'Svm with class weight']

for i in xrange(2):
    plt.title(title[i])
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')

    plot_decision_surface(svm[i], X_imp, Y)

png

png

The first SVM with equal class weights only classifies a small subset of the positive training points correctly. It produces very few false positives, it has higher overall precision but lower recall, than the second SVM.