diff --git a/Machine_Learning_Algorithms/Markov Chain Monte Carlo (MCMC) Algorithm/Program.c b/Machine_Learning_Algorithms/Markov Chain Monte Carlo (MCMC) Algorithm/Program.c new file mode 100644 index 00000000..160bef0f --- /dev/null +++ b/Machine_Learning_Algorithms/Markov Chain Monte Carlo (MCMC) Algorithm/Program.c @@ -0,0 +1,66 @@ +#include +#include +#include +#include + +// Define M_PI if not available in the math library +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +// Function to calculate the probability density of the target distribution (normal distribution) +double target_distribution(double x) { + double mean = 0.0; + double std_dev = 1.0; + double exponent = - (x - mean) * (x - mean) / (2 * std_dev * std_dev); + return exp(exponent) / (std_dev * sqrt(2 * M_PI)); +} + +// Function to propose a new sample, adds a random value within a step range around the current sample +double proposal(double x) { + double step_size = 0.5; // Step size for proposal distribution + return x + ((rand() / (double)RAND_MAX) - 0.5) * step_size * 2.0; +} + +// Metropolis-Hastings algorithm for MCMC +void metropolis_hastings(int samples, double *chain) { + chain[0] = 0.0; // Initial starting point + + for (int i = 1; i < samples; i++) { + double current = chain[i - 1]; + double proposed = proposal(current); + + // Calculate acceptance ratio + double acceptance_ratio = target_distribution(proposed) / target_distribution(current); + + // Accept or reject the proposal based on the acceptance ratio + if (acceptance_ratio >= 1.0 || ((double)rand() / RAND_MAX) < acceptance_ratio) { + chain[i] = proposed; // Accept proposal + } else { + chain[i] = current; // Reject proposal, stay at current sample + } + } +} + +// Main function to initialize, run MCMC, and print samples +int main() { + srand(time(NULL)); // Seed for random number generation + + int samples = 10000; // Number of samples + double *chain = malloc(samples * sizeof(double)); + if (chain == NULL) { + fprintf(stderr, "Memory allocation failed\n"); + return 1; + } + + metropolis_hastings(samples, chain); + + // Output every 100th sample to avoid too much output and for easier verification + printf("MCMC Samples (every 100th sample):\n"); + for (int i = 0; i < samples; i += 100) { + printf("%f\n", chain[i]); + } + + free(chain); + return 0; +} \ No newline at end of file diff --git a/Machine_Learning_Algorithms/Markov Chain Monte Carlo (MCMC) Algorithm/Readme.md b/Machine_Learning_Algorithms/Markov Chain Monte Carlo (MCMC) Algorithm/Readme.md new file mode 100644 index 00000000..68dee59e --- /dev/null +++ b/Machine_Learning_Algorithms/Markov Chain Monte Carlo (MCMC) Algorithm/Readme.md @@ -0,0 +1,59 @@ +# Markov Chain Monte Carlo (MCMC) Algorithm + + +# Description + +The Markov Chain Monte Carlo (MCMC) algorithm is a probabilistic sampling method used to approximate complex probability distributions. By constructing a Markov Chain that has the desired distribution as its equilibrium distribution, MCMC allows us to draw samples and estimate the distribution efficiently. This method is widely used in Bayesian inference, statistical modeling, and machine learning. + + +# Key Features + + 1.Sampling-Based Approach: Draws samples from a target distribution when direct sampling is difficult. + 2.Markov Chain Process: The algorithm relies on a sequence of dependent samples (a Markov Chain) that gradually converge to the desired distribution. + 3.Flexible and Scalable: MCMC can be applied to high-dimensional distributions. + 4.Useful in Bayesian Inference: Essential for estimating posterior distributions in Bayesian models. + + +# Problem Definition + +Given a probability distribution that is difficult to sample from directly, MCMC methods construct a Markov Chain that samples from this distribution over time. + + Constructing the Markov Chain + +The Markov Chain is initialized with a starting point and iteratively updated by sampling from the distribution. Two popular MCMC methods are: + + Gibbs Sampling: A special case of MCMC useful when sampling from the conditional distribution of each variable is easier. + +2. >Sampling Process + +The chain progresses by either accepting or rejecting new sample points based on the acceptance criterion (for Metropolis-Hastings) or by drawing from the conditional distributions (in Gibbs Sampling). + +3. >Convergence to Target Distribution + +As the Markov Chain progresses, the sequence of samples converges to the target distribution, allowing for approximations of distribution properties. + +# Time Complexity + +>Sampling: Varies based on the complexity of the target distribution and the proposal distribution used. Generally, MCMC methods are computationally intensive as they require many iterations to reach convergence. + +# Applications + +1. MCMC algorithms are widely used in: + +>Bayesian Inference: Estimating posterior distributions for model parameters. + +>Statistical Modeling: Approximating complex distributions in high-dimensional spaces. + +>Machine Learning: Sampling-based methods for unsupervised learning and probabilistic generative models. + + +# Conclusion + +The Markov Chain Monte Carlo (MCMC) algorithm is a powerful tool for approximating complex distributions that are hard to sample from directly. Although computationally intensive, MCMC enables flexible and scalable solutions in various fields, especially in Bayesian inference. However, careful tuning of parameters and convergence checks are essential to obtain accurate and reliable samples. \ No newline at end of file