From 2596c2c9c405194889918d90d4c4faaa8464e754 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 5 May 2021 22:38:55 +0800 Subject: [PATCH 01/10] readme first draft --- numpy_ml/naive_bayes/readme.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 numpy_ml/naive_bayes/readme.md diff --git a/numpy_ml/naive_bayes/readme.md b/numpy_ml/naive_bayes/readme.md new file mode 100644 index 0000000..78d3f87 --- /dev/null +++ b/numpy_ml/naive_bayes/readme.md @@ -0,0 +1,12 @@ +# Naive Bayes +The `naive_bayes.py` module implements: + +1. [Gaussian Naive Bayes] + +2. [Multinomial Naive Bayes] + +3. [Categorical Naive Bayes] + + +Reference: +H. Zhang (2004). The optimality of Naive Bayes. Proc. FLAIRS. \ No newline at end of file From b92e8f9c9199e8fa3fe1ff9d55c472c16345c38e Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 15 May 2021 20:51:10 +0800 Subject: [PATCH 02/10] added gaussianNB --- numpy_ml/naive_bayes/naive_bayes.py | 128 ++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 numpy_ml/naive_bayes/naive_bayes.py diff --git a/numpy_ml/naive_bayes/naive_bayes.py b/numpy_ml/naive_bayes/naive_bayes.py new file mode 100644 index 0000000..aa9c8c3 --- /dev/null +++ b/numpy_ml/naive_bayes/naive_bayes.py @@ -0,0 +1,128 @@ +import numpy as np + +class GaussianNB(): + """ + Gaussian Naive Bayes + + Assume each class conditional feature distribution is + independent and estimate the mean and variance from the + training data + + Parameters + ---------- + epsilon: float + a value that add to variance to prevent numerical error + + Attributes + ---------- + num_class : ndarray of shape (n_classes,) + count of each class in the training sample + + mean: ndarray of shape (n_classes,) + mean of each variance + + sigma: ndarray of shape (n_classes,) + variance of each class + + prior : ndarray of shape (n_classes,) + probability of each class + + """ + def __init__(self,eps=1e-6): + self.eps = eps + + def fit(self,X,y): + """ + Train the model with X,y + + Parameters + ---------- + X: ndarray of shape (n_samples, n_features) + Input data + y: ndarray of shape (n_samples,) + Target + + returns + -------- + self: object + """ + + self.n_sample, self.n_features = X.shape + self.labels = np.unique(y) + self.n_classes = len(self.labels) + + self.mean = np.zeros((self.n_classes,self.n_features)) + self.sigma = np.zeros((self.n_classes,self.n_features)) + self.prior = np.zeros((self.n_classes,)) + + for i in range(self.n_classes): + X_c = X[y==i,:] + + self.mean[i,:] = np.mean(X_c,axis=0) + self.sigma[i,:] = np.var(X_c,axis=0) + self.eps + self.prior[i] = X_c.shape[0]/self.n_sample + + return self + + def predict(self,X): + """ + used the trained model to generate prediction + + Parameters + --------- + X: ndarray of shape (n_samples, n_features) + Input data + + returns + ------- + probs : ndarray of shape (n_samples, n_classes) + The model predictions for each items in X to be in each class + """ + + probs = np.zeros((X.shape[0],self.n_classes)) + for i in range(self.n_classes): + probs[:,i] = self.prob(X,self.mean[i,:],self.sigma[i,:],self.prior[i]) + + + return probs + + def prob(self,X,mean,sigma,prior): + """ + compute the joint log likelihood of data based on gaussian distribution + + X: ndarray of shape (n_samples, n_features) + Input data + + mean: ndarray of shape (n_classes,) + mean of each variance + + sigma: ndarray of shape (n_classes,) + variance of each class + + prior : ndarray of shape (n_classes,) + probability of each class + + returns + ------- + joint_log_likelihood : ndarry of shape (n_samples,) + joint log likelihood of data + + """ + + prob = -self.n_features / 2 * np.log(2 * np.pi) - 0.5 * np.sum( + np.log(sigma ) + ) + prob -= 0.5 * np.sum(np.power(X -mean, 2) / (sigma), 1) + + joint_log_likelihood = prior + prob + return joint_log_likelihood + + + + + + + + + + From d3a1c514736f59ce52ea40d1a043449686410e69 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 15 May 2021 20:59:53 +0800 Subject: [PATCH 03/10] added __init__ file --- numpy_ml/naive_bayes/__init__.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 numpy_ml/naive_bayes/__init__.py diff --git a/numpy_ml/naive_bayes/__init__.py b/numpy_ml/naive_bayes/__init__.py new file mode 100644 index 0000000..c614360 --- /dev/null +++ b/numpy_ml/naive_bayes/__init__.py @@ -0,0 +1 @@ +from .naive_bayes import * From a4eff01db253fa2ff07fd087ead562f72f1f5fdc Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 15 May 2021 21:34:52 +0800 Subject: [PATCH 04/10] unit test for gaussianNB --- numpy_ml/tests/test_naive_bayes.py | 36 ++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 numpy_ml/tests/test_naive_bayes.py diff --git a/numpy_ml/tests/test_naive_bayes.py b/numpy_ml/tests/test_naive_bayes.py new file mode 100644 index 0000000..53882c3 --- /dev/null +++ b/numpy_ml/tests/test_naive_bayes.py @@ -0,0 +1,36 @@ +import numpy as np +from sklearn import datasets +from sklearn.model_selection import train_test_split + +from sklearn import naive_bayes + +from numpy_ml.naive_bayes.naive_bayes import GaussianNB + +def test_GaussianNB(): + iris = datasets.load_iris() + X = iris.data + y = iris.target + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) + + NB = GaussianNB() + NB.fit(X_train, y_train) + + probs=NB.predict(X_test) + pred = np.argmax(probs, 1) + accuracy = sum(pred==y_test)/X_test.shape[0] + + sklearn_NB = naive_bayes.GaussianNB() + sklearn_NB.fit(X_train, y_train) + + sk_pred=sklearn_NB.predict(X_test) + sk_accuracy = sum(sk_pred==y_test)/X_test.shape[0] + + + try: + np.testing.assert_almost_equal(accuracy, sk_accuracy) + print("\Accuracies are equal") + except AssertionError as e: + print("\Accuracies are not equal:\n{}".format(e)) + + + From bae8921c7aecc7f23d608c044664a7e4aec8e3eb Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 15 May 2021 21:35:34 +0800 Subject: [PATCH 05/10] update readme --- numpy_ml/naive_bayes/readme.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/numpy_ml/naive_bayes/readme.md b/numpy_ml/naive_bayes/readme.md index 78d3f87..c9243f8 100644 --- a/numpy_ml/naive_bayes/readme.md +++ b/numpy_ml/naive_bayes/readme.md @@ -3,10 +3,7 @@ The `naive_bayes.py` module implements: 1. [Gaussian Naive Bayes] -2. [Multinomial Naive Bayes] - -3. [Categorical Naive Bayes] Reference: -H. Zhang (2004). The optimality of Naive Bayes. Proc. FLAIRS. \ No newline at end of file +H. Zhang (2004). The optimality of Naive Bayes. Proc. FLAIRS. From f34aefc9b33133e431baee6061efbb44f06d6762 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 15 May 2021 21:36:39 +0800 Subject: [PATCH 06/10] update readme --- numpy_ml/naive_bayes/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy_ml/naive_bayes/readme.md b/numpy_ml/naive_bayes/readme.md index c9243f8..0ce91d0 100644 --- a/numpy_ml/naive_bayes/readme.md +++ b/numpy_ml/naive_bayes/readme.md @@ -1,7 +1,7 @@ # Naive Bayes The `naive_bayes.py` module implements: -1. [Gaussian Naive Bayes] +1. [Gaussian Naive Bayes] : compute the joint log likelihood of data based on gaussian distribution From eaa1267fd4b24ea818e794391f5e76ef30ac8037 Mon Sep 17 00:00:00 2001 From: ddbourgin Date: Sat, 29 May 2021 17:30:42 -0400 Subject: [PATCH 07/10] Move naive bayes classifer under linear models --- numpy_ml/linear_models/__init__.py | 1 + numpy_ml/{naive_bayes => linear_models}/naive_bayes.py | 0 numpy_ml/naive_bayes/__init__.py | 1 - numpy_ml/naive_bayes/readme.md | 9 --------- 4 files changed, 1 insertion(+), 10 deletions(-) rename numpy_ml/{naive_bayes => linear_models}/naive_bayes.py (100%) delete mode 100644 numpy_ml/naive_bayes/__init__.py delete mode 100644 numpy_ml/naive_bayes/readme.md diff --git a/numpy_ml/linear_models/__init__.py b/numpy_ml/linear_models/__init__.py index b29e90b..a0a762a 100644 --- a/numpy_ml/linear_models/__init__.py +++ b/numpy_ml/linear_models/__init__.py @@ -1 +1,2 @@ from .lm import * +from .naive_bayes import * diff --git a/numpy_ml/naive_bayes/naive_bayes.py b/numpy_ml/linear_models/naive_bayes.py similarity index 100% rename from numpy_ml/naive_bayes/naive_bayes.py rename to numpy_ml/linear_models/naive_bayes.py diff --git a/numpy_ml/naive_bayes/__init__.py b/numpy_ml/naive_bayes/__init__.py deleted file mode 100644 index c614360..0000000 --- a/numpy_ml/naive_bayes/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .naive_bayes import * diff --git a/numpy_ml/naive_bayes/readme.md b/numpy_ml/naive_bayes/readme.md deleted file mode 100644 index 0ce91d0..0000000 --- a/numpy_ml/naive_bayes/readme.md +++ /dev/null @@ -1,9 +0,0 @@ -# Naive Bayes -The `naive_bayes.py` module implements: - -1. [Gaussian Naive Bayes] : compute the joint log likelihood of data based on gaussian distribution - - - -Reference: -H. Zhang (2004). The optimality of Naive Bayes. Proc. FLAIRS. From 858927514bde31cf2da0f0d646cfe986b35432b5 Mon Sep 17 00:00:00 2001 From: ddbourgin Date: Sun, 30 May 2021 13:23:12 -0400 Subject: [PATCH 08/10] Add more stringent tests for GaussianNBClassifier --- numpy_ml/tests/test_naive_bayes.py | 76 ++++++++++++++++++++++-------- 1 file changed, 56 insertions(+), 20 deletions(-) diff --git a/numpy_ml/tests/test_naive_bayes.py b/numpy_ml/tests/test_naive_bayes.py index 53882c3..34780d5 100644 --- a/numpy_ml/tests/test_naive_bayes.py +++ b/numpy_ml/tests/test_naive_bayes.py @@ -4,33 +4,69 @@ from sklearn import naive_bayes -from numpy_ml.naive_bayes.naive_bayes import GaussianNB +from numpy_ml.linear_models import GaussianNBClassifier +from numpy_ml.utils.testing import random_tensor -def test_GaussianNB(): - iris = datasets.load_iris() - X = iris.data - y = iris.target - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) - NB = GaussianNB() - NB.fit(X_train, y_train) +def test_GaussianNB(N=10): + np.random.seed(12345) + N = np.inf if N is None else N - probs=NB.predict(X_test) - pred = np.argmax(probs, 1) - accuracy = sum(pred==y_test)/X_test.shape[0] + i = 1 + while i < N + 1: + n_ex = np.random.randint(1, 300) + n_feats = np.random.randint(1, 100) + n_classes = np.random.randint(2, 10) - sklearn_NB = naive_bayes.GaussianNB() - sklearn_NB.fit(X_train, y_train) + X = random_tensor((n_ex, n_feats), standardize=True) + y = np.random.randint(0, n_classes, size=n_ex) - sk_pred=sklearn_NB.predict(X_test) - sk_accuracy = sum(sk_pred==y_test)/X_test.shape[0] + X_test = random_tensor((n_ex, n_feats), standardize=True) + NB = GaussianNBClassifier(eps=1e-09) + NB.fit(X, y) - try: - np.testing.assert_almost_equal(accuracy, sk_accuracy) - print("\Accuracies are equal") - except AssertionError as e: - print("\Accuracies are not equal:\n{}".format(e)) + preds = NB.predict(X_test) + sklearn_NB = naive_bayes.GaussianNB() + sklearn_NB.fit(X, y) + sk_preds = sklearn_NB.predict(X_test) + for i in range(len(NB.labels)): + P = NB.parameters + jointi = np.log(sklearn_NB.class_prior_[i]) + jointi_mine = np.log(P["prior"][i]) + + np.testing.assert_almost_equal(jointi, jointi_mine) + + n_ij = -0.5 * np.sum(np.log(2.0 * np.pi * sklearn_NB.sigma_[i, :])) + n_ij_mine = -0.5 * np.sum(np.log(2.0 * np.pi * P["sigma"][i])) + + np.testing.assert_almost_equal(n_ij_mine, n_ij) + + n_ij2 = n_ij - 0.5 * np.sum( + ((X_test - sklearn_NB.theta_[i, :]) ** 2) / (sklearn_NB.sigma_[i, :]), 1 + ) + + n_ij2_mine = n_ij_mine - 0.5 * np.sum( + ((X_test - P["mean"][i]) ** 2) / (P["sigma"][i]), 1 + ) + np.testing.assert_almost_equal(n_ij2_mine, n_ij2, decimal=4) + + llh = jointi + n_ij2 + llh_mine = jointi_mine + n_ij2_mine + + np.testing.assert_almost_equal(llh_mine, llh, decimal=4) + + np.testing.assert_almost_equal(P["prior"], sklearn_NB.class_prior_) + np.testing.assert_almost_equal(P["mean"], sklearn_NB.theta_) + np.testing.assert_almost_equal(P["sigma"], sklearn_NB.sigma_) + np.testing.assert_almost_equal( + sklearn_NB._joint_log_likelihood(X_test), + NB._log_posterior(X_test), + decimal=4, + ) + np.testing.assert_almost_equal(preds, sk_preds) + print("PASSED") + i += 1 From 0f6a15685155a97bdcb561b919405d52282691ca Mon Sep 17 00:00:00 2001 From: ddbourgin Date: Sun, 30 May 2021 13:26:37 -0400 Subject: [PATCH 09/10] Overhaul GaussianNBClassifier: fix log posterior calc, fix attribute names + descriptions, expand documentation --- numpy_ml/linear_models/naive_bayes.py | 271 +++++++++++++++++--------- 1 file changed, 177 insertions(+), 94 deletions(-) diff --git a/numpy_ml/linear_models/naive_bayes.py b/numpy_ml/linear_models/naive_bayes.py index aa9c8c3..d174fd1 100644 --- a/numpy_ml/linear_models/naive_bayes.py +++ b/numpy_ml/linear_models/naive_bayes.py @@ -1,128 +1,211 @@ -import numpy as np - -class GaussianNB(): - """ - Gaussian Naive Bayes - - Assume each class conditional feature distribution is - independent and estimate the mean and variance from the - training data - - Parameters - ---------- - epsilon: float - a value that add to variance to prevent numerical error - - Attributes - ---------- - num_class : ndarray of shape (n_classes,) - count of each class in the training sample - - mean: ndarray of shape (n_classes,) - mean of each variance - - sigma: ndarray of shape (n_classes,) - variance of each class - - prior : ndarray of shape (n_classes,) - probability of each class - - """ - def __init__(self,eps=1e-6): - self.eps = eps - - def fit(self,X,y): +import numpy as np + + +class GaussianNBClassifier: + def __init__(self, eps=1e-6): + r""" + A naive Bayes classifier for real-valued data. + + Notes + ----- + The naive Bayes model assumes the features of each training example + :math:`\mathbf{x}` are mutually independent given the example label + :math:`y`: + + .. math:: + + P(\mathbf{x}_i \mid y_i) = \prod_{j=1}^M P(x_{i,j} \mid y_i) + + where :math:`M` is the rank of the `i`th example :math:`\mathbf{x}_i` + and :math:`y_i` is the label associated with the `i`th example. + + Combining the conditional independence assumption with a simple + application of Bayes' theorem gives the naive Bayes classification + rule: + + .. math:: + + \hat{y} &= \arg \max_y P(y \mid \mathbf{x}) \\ + &= \arg \max_y P(y) P(\mathbf{x} \mid y) \\ + &= \arg \max_y P(y) \prod_{j=1}^M P(x_j \mid y) + + In the final expression, the prior class probability :math:`P(y)` can + be specified in advance or estimated empirically from the training + data. + + In the Gaussian version of the naive Bayes model, the feature + likelihood is assumed to be normally distributed for each class: + + .. math:: + + \mathbf{x}_i \mid y_i = c, \theta \sim \mathcal{N}(\mu_c, \Sigma_c) + + where :math:`\theta` is the set of model parameters: :math:`\{\mu_1, + \Sigma_1, \ldots, \mu_K, \Sigma_K\}`, :math:`K` is the total number of + unique classes present in the data, and the parameters for the Gaussian + associated with class :math:`c`, :math:`\mu_c` and :math:`\Sigma_c` + (where :math:`1 \leq c \leq K`), are estimated via MLE from the set of + training examples with label :math:`c`. + + Parameters + ---------- + eps : float + A value added to the variance to prevent numerical error. Default + is 1e-6. + + Attributes + ---------- + parameters : dict + Dictionary of model parameters: "mean", the `(K, M)` array of + feature means under each class, "sigma", the `(K, M)` array of + feature variances under each class, and "prior", the `(K,)` array of + empirical prior probabilities for each class label. + hyperparameters : dict + Dictionary of model hyperparameters + labels : :py:class:`ndarray ` of shape `(K,)` + An array containing the unique class labels for the training + examples. """ - Train the model with X,y + self.labels = None + self.hyperparameters = {"eps": eps} + self.parameters = { + "mean": None, # shape: (K, M) + "sigma": None, # shape: (K, M) + "prior": None, # shape: (K,) + } + + def fit(self, X, y): + """ + Fit the model parameters via maximum likelihood. + + Notes + ----- + The model parameters are stored in the :py:attr:`parameters` attribute. + The following keys are present: + + mean: :py:class:`ndarray ` of shape `(K, M)` + Feature means for each of the `K` label classes + sigma: :py:class:`ndarray ` of shape `(K, M)` + Feature variances for each of the `K` label classes + prior : :py:class:`ndarray ` of shape `(K,)` + Prior probability of each of the `K` label classes, estimated + empirically from the training data Parameters ---------- - X: ndarray of shape (n_samples, n_features) - Input data - y: ndarray of shape (n_samples,) - Target - - returns - -------- + X : :py:class:`ndarray ` of shape `(N, M)` + A dataset consisting of `N` examples, each of dimension `M` + y: :py:class:`ndarray ` of shape `(N,)` + The class label for each of the `N` examples in `X` + + Returns + ------- self: object """ - - self.n_sample, self.n_features = X.shape + P = self.parameters + H = self.hyperparameters + self.labels = np.unique(y) - self.n_classes = len(self.labels) - self.mean = np.zeros((self.n_classes,self.n_features)) - self.sigma = np.zeros((self.n_classes,self.n_features)) - self.prior = np.zeros((self.n_classes,)) + K = len(self.labels) + N, M = X.shape - for i in range(self.n_classes): - X_c = X[y==i,:] + P["mean"] = np.zeros((K, M)) + P["sigma"] = np.zeros((K, M)) + P["prior"] = np.zeros((K,)) - self.mean[i,:] = np.mean(X_c,axis=0) - self.sigma[i,:] = np.var(X_c,axis=0) + self.eps - self.prior[i] = X_c.shape[0]/self.n_sample + for i, c in enumerate(self.labels): + X_c = X[y == c, :] + P["mean"][i, :] = np.mean(X_c, axis=0) + P["sigma"][i, :] = np.var(X_c, axis=0) + H["eps"] + P["prior"][i] = X_c.shape[0] / N return self - def predict(self,X): + def predict(self, X): """ - used the trained model to generate prediction + Use the trained classifier to predict the class label for each example + in **X**. Parameters - --------- - X: ndarray of shape (n_samples, n_features) - Input data + ---------- + X: :py:class:`ndarray ` of shape `(N, M)` + A dataset of `N` examples, each of dimension `M` - returns + Returns ------- - probs : ndarray of shape (n_samples, n_classes) - The model predictions for each items in X to be in each class + labels : :py:class:`ndarray ` of shape `(N)` + The predicted class labels for each example in `X` """ + return self.labels[self._log_posterior(X).argmax(axis=1)] - probs = np.zeros((X.shape[0],self.n_classes)) - for i in range(self.n_classes): - probs[:,i] = self.prob(X,self.mean[i,:],self.sigma[i,:],self.prior[i]) - + def _log_posterior(self, X): + r""" + Compute the (unnormalized) log posterior for each class. - return probs - - def prob(self,X,mean,sigma,prior): - """ - compute the joint log likelihood of data based on gaussian distribution - - X: ndarray of shape (n_samples, n_features) - Input data - - mean: ndarray of shape (n_classes,) - mean of each variance - - sigma: ndarray of shape (n_classes,) - variance of each class - - prior : ndarray of shape (n_classes,) - probability of each class + Parameters + ---------- + X: :py:class:`ndarray ` of shape `(N, M)` + A dataset of `N` examples, each of dimension `M` - returns + Returns ------- - joint_log_likelihood : ndarry of shape (n_samples,) - joint log likelihood of data - + log_posterior : :py:class:`ndarray ` of shape `(N, K)` + Unnormalized log posterior probability of each class for each + example in `X` """ + K = len(self.labels) + log_posterior = np.zeros((X.shape[0], K)) + for i in range(K): + log_posterior[:, i] = self._log_class_posterior(X, i) + return log_posterior - prob = -self.n_features / 2 * np.log(2 * np.pi) - 0.5 * np.sum( - np.log(sigma ) - ) - prob -= 0.5 * np.sum(np.power(X -mean, 2) / (sigma), 1) - - joint_log_likelihood = prior + prob - return joint_log_likelihood + def _log_class_posterior(self, X, class_idx): + r""" + Compute the (unnormalized) log posterior for the label at index + `class_idx` in :py:attr:`labels`. + Notes + ----- + Unnormalized log posterior for example :math:`\mathbf{x}_i` and class + :math:`c` is:: + .. math:: + \log P(y_i = c \mid \mathbf{x}_i, \theta) + &\propto \log P(y=c \mid \theta) + + \log P(\mathbf{x}_i \mid y_i = c, \theta) \\ + &\propto \log P(y=c \mid \theta) + \sum{j=1}^M \log P(x_j \mid y_i = c, \theta) + In the Gaussian naive Bayes model, the feature likelihood for class + :math:`c`, :math:`P(\mathbf{x}_i \mid y_i = c, \theta)` is assumed to + be normally distributed + .. math:: + \mathbf{x}_i \mid y_i = c, \theta \sim \mathcal{N}(\mu_c, \Sigma_c) + Parameters + ---------- + X: :py:class:`ndarray ` of shape `(N, M)` + A dataset of `N` examples, each of dimension `M` + class_idx : int + The index of the current class in :py:attr:`labels` - + Returns + ------- + log_class_posterior : :py:class:`ndarray ` of shape `(N,)` + Unnormalized log probability of the label at index `class_idx` + in :py:attr:`labels` for each example in `X` + """ + P = self.parameters + mu = P["mean"][class_idx] + prior = P["prior"][class_idx] + sigsq = P["sigma"][class_idx] + + # log likelihood = log X | N(mu, sigsq) + log_likelihood = -0.5 * np.sum(np.log(2 * np.pi * sigsq)) + log_likelihood -= 0.5 * np.sum(((X - mu) ** 2) / sigsq, axis=1) + return log_likelihood + np.log(prior) From 126602d98d028dff4ca17e2d0cad3b53a92b5a56 Mon Sep 17 00:00:00 2001 From: ddbourgin Date: Sun, 30 May 2021 13:28:17 -0400 Subject: [PATCH 10/10] Update README for GaussianNBClassifier --- numpy_ml/linear_models/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy_ml/linear_models/README.md b/numpy_ml/linear_models/README.md index 743a57c..53ac166 100644 --- a/numpy_ml/linear_models/README.md +++ b/numpy_ml/linear_models/README.md @@ -8,6 +8,7 @@ The `lm.py` module implements: 3. [Bayesian linear regression](https://en.wikipedia.org/wiki/Bayesian_linear_regression) with maximum a posteriori parameter estimates via [conjugacy](https://en.wikipedia.org/wiki/Conjugate_prior#Table_of_conjugate_distributions) - Known coefficient prior mean and known error variance - Known coefficient prior mean and unknown error variance +4. [Naive Bayes classifier](https://en.wikipedia.org/wiki/Naive_Bayes_classifier) with Gaussian feature likelihoods. ## Plots