diff --git a/.github/ISSUE_TEMPLATE/new_algorithm.md b/.github/ISSUE_TEMPLATE/new_algorithm.md index 0ef64c76..1c504585 100644 --- a/.github/ISSUE_TEMPLATE/new_algorithm.md +++ b/.github/ISSUE_TEMPLATE/new_algorithm.md @@ -2,7 +2,6 @@ name: "Add New Algorithm" about: Propose a new algorithm to be added to the repository title: "[NEW ALGORITHM] " -labels: new algorithm, gssoc-ext, hacktoberfest, level1 assignees: '' --- @@ -21,14 +20,3 @@ assignees: '' ### About: Propose a new algorithm to be added to the repository - ---- - -### Labels: -```new algorithm, gssoc-ext, hacktoberfest, level1``` - ---- - -### Assignees: -- [ ] Contributor in GSSoC-ext -- [ ] Want to work on it diff --git a/.github/ISSUE_TEMPLATE/update_algorithm.md b/.github/ISSUE_TEMPLATE/update_algorithm.md index 77e814e0..e018e416 100644 --- a/.github/ISSUE_TEMPLATE/update_algorithm.md +++ b/.github/ISSUE_TEMPLATE/update_algorithm.md @@ -2,7 +2,6 @@ name: Update Algorithm about: Suggest changes to an existing algorithm title: "[UPDATE ALGORITHM] " -labels: algorithm update, gssoc-ext, hacktoberfest, level1 assignees: '' --- diff --git a/Binary Tree Algorithms/Non-Recursion Traversal Algorithms/README.md b/Binary Tree Algorithms/Non-Recursion Traversal Algorithms/README.md new file mode 100644 index 00000000..f399efce --- /dev/null +++ b/Binary Tree Algorithms/Non-Recursion Traversal Algorithms/README.md @@ -0,0 +1,36 @@ +# Non-Recursive Binary Tree Traversal + +This section describes the implementation of functions in C that performs non-recursive traversal of a binary tree. Here offers pre-order, in-order, and post-order traversals. + +## Problem Statement +Given a binary tree, implement pre-order, in-order, and post-order traversals of the tree in a non-recursive manner. + +## Solution +To perform non-recursive traversal of a binary tree, we utilize a stack data structure. The `StackNode` struct is essential as it allows us to keep track of the nodes during traversal. Each `StackNode` contains a pointer to a `Node` of the binary tree and a pointer to the next `StackNode`, mimicking the Last-In-First-Out (LIFO) behavior of a stack. + +### 1. pre-order Traversal +In pre-order traversal, we visit the root node first, then recursively perform pre-order traversal on the left subtree, and finally on the right subtree. + +**Implementation Details:** +- Initialize an empty stack and push the root node onto the stack. +- Pop a node from the stack, visit it, and push its right child followed by its left child onto the stack. +- Repeat the process until the stack is empty. + +### 2. in-order Traversal +In in-order traversal, we recursively perform in-order traversal on the left subtree, visit the root node, and then recursively perform in-order traversal on the right subtree. + +**Implementation Details:** +- Initialize an empty stack and set the current node to the root. +- Push all left children of the current node onto the stack until a leaf node is reached. +- Pop a node from the stack, visit it, set the current node to its right child, and repeat the process. +- If the current node is NULL and the stack is empty, the traversal is complete. + +### 3. post-order Traversal +In post-order traversal, we recursively perform post-order traversal on the left subtree, then on the right subtree, and finally visit the root node. + +**Implementation Details:** +- Initialize two stacks, `stack1` and `stack2`. +- Push the root node onto `stack1`. +- Pop a node from `stack1`, push it onto `stack2`, and then push its left child followed by its right child onto `stack1`. +- Repeat the process until `stack1` is empty. +- Pop nodes from `stack2` and visit them, which will be in post-order since the last nodes to be popped from `stack1` are the leftmost nodes. \ No newline at end of file diff --git a/Binary Tree Algorithms/Non-Recursion Traversal Algorithms/program.c b/Binary Tree Algorithms/Non-Recursion Traversal Algorithms/program.c new file mode 100644 index 00000000..1df42380 --- /dev/null +++ b/Binary Tree Algorithms/Non-Recursion Traversal Algorithms/program.c @@ -0,0 +1,144 @@ +#include +#include + + +typedef struct Node { + int data; + struct Node *left; + struct Node *right; +} Node; + + +typedef struct StackNode { + Node *treeNode; + // Pointer to the next stack node + struct StackNode *next; +} StackNode; + +// Function to create a new node +Node* newNode(int data) { + Node *node = (Node *)malloc(sizeof(Node)); + node->data = data; + node->left = NULL; + node->right = NULL; + return node; +} + +// Function to create a new stack node +StackNode* newStackNode(Node *treeNode) { + StackNode *stackNode = (StackNode *)malloc(sizeof(StackNode)); + stackNode->treeNode = treeNode; + stackNode->next = NULL; + return stackNode; +} + +// Function to check if the stack is empty +int isStackEmpty(StackNode *top) { + return top == NULL; +} + +// Function to push a node onto the stack +void push(StackNode **top, Node *treeNode) { + StackNode *newStackNode = (StackNode *)malloc(sizeof(StackNode)); + newStackNode->treeNode = treeNode; + newStackNode->next = *top; + *top = newStackNode; +} + +// Function to pop a node from the stack +Node* pop(StackNode **top) { + if (isStackEmpty(*top)) { + return NULL; + } + StackNode *temp = *top; + Node *treeNode = temp->treeNode; + *top = temp->next; + free(temp); + return treeNode; +} + +// Function to get the top node of the stack +Node* top(StackNode *top) { + if (isStackEmpty(top)) { + return NULL; + } + return top->treeNode; +} + +// Function to perform preorder traversal of the tree +void preorderTraversal(Node *root) { + if (root == NULL) return; + StackNode *stack = NULL; + // Push the root node onto the stack + push(&stack, root); + while (!isStackEmpty(stack)) { + Node *current = pop(&stack); + // Print the data + printf("%d ", current->data); + if (current->right) push(&stack, current->right); + if (current->left) push(&stack, current->left); + } +} + +// Function to perform inorder traversal of the tree +void inorderTraversal(Node *root) { + if (root == NULL) return; + StackNode *stack = NULL; + Node *current = root; + while (current != NULL || !isStackEmpty(stack)) { + while (current != NULL) { + // Push nodes onto the stack + push(&stack, current); + // Move to the left child + current = current->left; + } + current = pop(&stack); + printf("%d ", current->data); + // Move to the right child + current = current->right; + } +} + +// Function to perform postorder traversal of the tree +void postorderTraversal(Node *root) { + if (root == NULL) return; + // Initialize stack1 + StackNode *stack1 = NULL; + // Initialize stack2 + StackNode *stack2 = NULL; + // Push the root node onto stack1 + push(&stack1, root); + while (!isStackEmpty(stack1)) { + Node *current = pop(&stack1); + // Push the node onto stack2 + push(&stack2, current); + // Push left child if it exists + if (current->left) push(&stack1, current->left); + // Push right child if it exists + if (current->right) push(&stack1, current->right); + } + while (!isStackEmpty(stack2)) { + // Pop a node from stack2 + Node *current = pop(&stack2); + printf("%d ", current->data); + } +} + +// Main function to demonstrate tree traversals +int main() { + Node* root = newNode(10); + root->left = newNode(5); + root->right = newNode(20); + root->left->left = newNode(3); + root->left->right = newNode(8); + printf("Preorder traversal: "); + preorderTraversal(root); + printf("\n"); + printf("Inorder traversal: "); + inorderTraversal(root); + printf("\n"); + printf("Postorder traversal: "); + postorderTraversal(root); + printf("\n"); + return 0; +} \ No newline at end of file diff --git a/Bitwise Algorithms/Euclid's Algorithm.c b/Bitwise Algorithms/Euclid's Algorithm.c new file mode 100644 index 00000000..ca6aeb9f --- /dev/null +++ b/Bitwise Algorithms/Euclid's Algorithm.c @@ -0,0 +1,34 @@ +#include + +int BitwiseGCD(int a, int b) +{ + // Base cases + if (b == 0 || a == b) return a; + if (a == 0) return b; + + // If both a and b are even + // divide both a and b by 2. And multiply the result with 2 + if ( (a & 1) == 0 && (b & 1) == 0 ) + return gcd(a>>1, b>>1) << 1; + + // If a is even and b is odd, divide a by 2 + if ( (a & 1) == 0 && (b & 1) != 0 ) + return gcd(a>>1, b); + + // If a is odd and b is even, divide b by 2 + if ( (a & 1) != 0 && (b & 1) == 0 ) + return gcd(a, b>>1); + + // If both are odd, then apply normal subtraction algorithm. + // Note that odd-odd case always converts odd-even case after one recursion + return (a > b)? gcd(a-b, b): gcd(a, b-a); +} + +int main() { + int m, n; + printf("Enter two nonnegative integers: "); + scanf("%d %d", &m, &n); + int gcd = BitwiseGCD(m, n); + printf("Greatest common divisor (GCD) of %d and %d is: %d\n", m, n, gcd); + return 0; +} diff --git a/Bitwise Algorithms/README.md b/Bitwise Algorithms/README.md new file mode 100644 index 00000000..b156a990 --- /dev/null +++ b/Bitwise Algorithms/README.md @@ -0,0 +1,90 @@ +# Euclid’s Algorithm when % and / operations are costly + + + +**Euclid’s algorithm** is used to find **Greatest Common Divisor (GCD)** of two numbers. There are mainly **two** versions of algorithm. +- Using subtraction +- Using modulo operator + + +### Version 1 (Using subtraction) +```plaintext +// Recursive function to return gcd of a and b + +int gcd(int a, int b) +{ + if (a == b) return a; + + return (a > b)? gcd(a-b, b): gcd(a, b-a); +} +``` +**Time Complexity** : O(max(a, b)) +**Space Complexity** : O(1) + + + +## Version 2 (Using modulo operator) + +```plaintext +// Function to return gcd of a and b + +int gcd(int a, int b) +{ + if (a == 0) return b; + + return gcd(b%a, a); +} +``` + +**Time Complexity** : O(log(max(a, b))) +**Space Complexity** : O(1) + + +Version 1 can take linear time to find the GCD. +Consider the situation when one of the given numbers is much bigger than the other: +Version 2 is obviously more efficient as there are less recursive calls and takes logarithmic time. + +**Consider a situation where modulo operator is not allowed, can we optimize version 1 to work faster?** + +Below are some important observations. The idea is to use bitwise operators. +We can find x/2 using x>>1. We can check whether x is odd or even using x&1. +- gcd(a, b) = 2*gcd(a/2, b/2) if both a and b are even. +- gcd(a, b) = gcd(a/2, b) if a is even and b is odd. +- gcd(a, b) = gcd(a, b/2) if a is odd and b is even. + + +## Bitwise Algorithms: + +### Implementation using Bitwise Operators: + +```plaintext +int gcd(int a, int b) +{ + // Base cases + if (b == 0 || a == b) return a; + if (a == 0) return b; + + // If both a and b are even, divide both a + // and b by 2. And multiply the result with 2 + if ( (a & 1) == 0 && (b & 1) == 0 ) + return gcd(a>>1, b>>1) << 1; + + // If a is even and b is odd, divide a by 2 + if ( (a & 1) == 0 && (b & 1) != 0 ) + return gcd(a>>1, b); + + // If a is odd and b is even, divide b by 2 + if ( (a & 1) != 0 && (b & 1) == 0 ) + return gcd(a, b>>1); + + // If both are odd, then apply normal subtraction + // algorithm. Note that odd-odd case always + // converts odd-even case after one recursion + return (a > b)? gcd(a-b, b): gcd(a, b-a); +} +``` + +**Time Complexity** : O(log(max(a, b))) +**Space Complexity** : O(1) + +The Time and Space Complexity remains same as using the modulo operators. \ No newline at end of file diff --git a/Deadlock avoidance algorithm/Wait and Die Algorithm/program.c b/Deadlock avoidance algorithm/Wait and Die Algorithm/program.c new file mode 100644 index 00000000..6b05bc32 --- /dev/null +++ b/Deadlock avoidance algorithm/Wait and Die Algorithm/program.c @@ -0,0 +1,84 @@ +#include +#include +#include +#include + +#define MAX_TRANSACTIONS 5 // Maximum number of transactions +#define MAX_RESOURCES 3 // Maximum number of resources + +typedef struct { + int id; // Transaction ID + int timestamp; // Transaction's timestamp (used for age comparison) +} Transaction; + +typedef struct { + bool is_locked; // Resource lock status + Transaction *locked_by; // Pointer to the transaction that holds the lock +} Resource; + +Transaction transactions[MAX_TRANSACTIONS]; +Resource resources[MAX_RESOURCES]; + +// Initialize transactions with unique IDs and timestamps +void initialize_transactions() { + srand(time(NULL)); + for (int i = 0; i < MAX_TRANSACTIONS; i++) { + transactions[i].id = i + 1; + transactions[i].timestamp = rand() % 100; // Random timestamp for demonstration + } +} + +// Initialize resources as unlocked +void initialize_resources() { + for (int i = 0; i < MAX_RESOURCES; i++) { + resources[i].is_locked = false; + resources[i].locked_by = NULL; + } +} + +// Function to simulate requesting a resource +void request_resource(int trans_id, int res_id) { + Transaction *transaction = &transactions[trans_id]; + Resource *resource = &resources[res_id]; + + printf("Transaction %d (Timestamp %d) requesting Resource %d\n", + transaction->id, transaction->timestamp, res_id); + + // If resource is not locked, grant it to the transaction + if (!resource->is_locked) { + resource->is_locked = true; + resource->locked_by = transaction; + printf("Resource %d granted to Transaction %d\n", res_id, transaction->id); + } else { + // If resource is locked, apply Wait-Die scheme + Transaction *current_holder = resource->locked_by; + + if (transaction->timestamp < current_holder->timestamp) { + // If transaction is older, it waits + printf("Transaction %d waits for Resource %d held by Transaction %d\n", + transaction->id, res_id, current_holder->id); + } else { + // If transaction is younger, it dies (abort) + printf("Transaction %d aborted (dies) as it is younger than Transaction %d holding Resource %d\n", + transaction->id, current_holder->id, res_id); + } + } +} + +// Function to simulate transactions requesting resources +void simulate_requests() { + request_resource(0, 1); // Transaction 1 requests Resource 2 + request_resource(1, 1); // Transaction 2 requests Resource 2 (Wait or Die based on timestamps) + request_resource(2, 0); // Transaction 3 requests Resource 1 + request_resource(3, 1); // Transaction 4 requests Resource 2 (Wait or Die based on timestamps) +} + +int main() { + initialize_transactions(); + initialize_resources(); + + printf("Simulating Wait-Die Deadlock Avoidance...\n"); + simulate_requests(); + + return 0; +} diff --git a/Deadlock avoidance algorithm/Wait and Die Algorithm/readme.md b/Deadlock avoidance algorithm/Wait and Die Algorithm/readme.md new file mode 100644 index 00000000..dce3c66d --- /dev/null +++ b/Deadlock avoidance algorithm/Wait and Die Algorithm/readme.md @@ -0,0 +1,57 @@ +# Wait-Die Deadlock Avoidance Algorithm in C + +This project implements the **Wait-Die Algorithm** in C, a deadlock avoidance technique used primarily in database and transaction management systems. The Wait-Die scheme ensures that deadlocks do not occur by enforcing a priority rule based on transaction age (timestamp), allowing only specific requests while aborting others. + +## Table of Contents +- [Introduction](#introduction) +- [Algorithm Overview](#algorithm-overview) +- [Features](#features) +- [Example](#example) +- [Limitations](#limitations) + +## Introduction + +The Wait-Die Algorithm is a deadlock avoidance algorithm that manages resource allocation based on transaction age. When a transaction requests a resource that is already held by another transaction, the algorithm uses timestamps to determine whether the requesting transaction should wait or be aborted ("die"). This approach prevents deadlocks by avoiding circular waiting conditions. + +## Algorithm Overview + +The Wait-Die Algorithm works as follows: +- **Age Comparison**: Each transaction is assigned a timestamp when it begins, representing its age. +- **Wait Rule**: If a younger transaction requests a resource held by an older transaction, it waits. +- **Die Rule**: If an older transaction requests a resource held by a younger transaction, the older transaction is aborted (dies) and can retry later. + +This scheme avoids deadlock by preventing a cycle in the wait-for graph, thus maintaining system safety. + +## Features + +- **Deadlock Avoidance**: Prevents deadlock by enforcing a strict priority rule based on transaction age. +- **Simple Priority System**: Uses timestamps to decide which transactions wait and which are aborted. +- **Efficient for Database Systems**: Commonly used in databases and transactional systems to manage locks on resources. + +## Example + +### Sample Scenario + +Assume we have three transactions with timestamps and two resources: + +| Transaction | ID | Timestamp | +|-------------|----|-----------| +| T1 | 1 | 10 | +| T2 | 2 | 15 | +| T3 | 3 | 5 | + +#### Simulation + +1. **Transaction T1 requests Resource R1**: Granted. +2. **Transaction T2 requests Resource R1**: Transaction T2 is younger than T1, so it waits. +3. **Transaction T3 requests Resource R1**: Transaction T3 is older than T1, so T1 is aborted (dies) and releases R1 for T3. + +This approach ensures the system remains deadlock-free by allowing older transactions to preempt younger ones if necessary. + +## Limitations + +- **Transaction-Based**: Suited for transactional systems; not typically used for general-purpose resource allocation. +- **Fixed Priority**: Transaction age remains fixed, so newer transactions may experience more aborts. +- **Non-Preemptive Resources**: The algorithm assumes resources are non-preemptive, meaning a held resource cannot be forcibly taken from a transaction. + +This implementation demonstrates a simplified version of the Wait-Die algorithm for educational purposes. For a real-world application, further refinements would be required to handle complex resource and transaction management scenarios. diff --git a/Dynamic Programming/Dice Roll Problem/README.md b/Dynamic Programming/Dice Roll Problem/README.md new file mode 100644 index 00000000..9bb9474f --- /dev/null +++ b/Dynamic Programming/Dice Roll Problem/README.md @@ -0,0 +1,34 @@ +# Dice Roll Sum Problem + +## Description + +The **Dice Roll Sum** problem is a dynamic programming challenge that asks how many ways you can achieve a given target sum by rolling a certain number of dice. Each die has a fixed number of faces (e.g., 6), and each face has an equal probability of appearing. The objective is to calculate the number of distinct ways to reach the target sum using the given dice. + +### Problem Statement +Given: +- `n`: the number of dice, +- `target`: the target sum, +- `faces`: the number of faces on each die (default is 6). + +Find the number of ways to achieve exactly the target sum by rolling the dice. + +### Example + +**Input:** +- Number of dice: `2` +- Target sum: `7` + +**Output:** +- Number of ways to reach target sum: `6` + +**Explanation:** +With 2 dice, there are six ways to achieve a sum of 7: +- (1,6), (2,5), (3,4), (4,3), (5,2), (6,1) + +## Solution Approach + +We use **dynamic programming** to build up the solution by breaking down the problem into smaller subproblems: +1. Define a 2D DP array `dp[i][j]` where `i` is the number of dice and `j` is the target sum. +2. Initialize `dp[0][0] = 1`, representing one way to achieve a sum of 0 with 0 dice. +3. For each dice count `i`, calculate the possible ways to reach each target sum `j` by considering all face values from 1 up to `faces`. +4. The answer will be stored in `dp[n][target]`, representing the number of ways to reach the target sum with `n` dice. \ No newline at end of file diff --git a/Dynamic Programming/Dice Roll Problem/program.c b/Dynamic Programming/Dice Roll Problem/program.c new file mode 100644 index 00000000..359c33cf --- /dev/null +++ b/Dynamic Programming/Dice Roll Problem/program.c @@ -0,0 +1,41 @@ +#include +#include + +#define MAX_DICE 100 +#define MAX_SUM 1000 +#define FACES 6 // Number of faces on each die, can be adjusted as needed + +// DP table to store the number of ways to get each sum with a given number of dice +int dp[MAX_DICE + 1][MAX_SUM + 1]; + +int diceRollSum(int n, int target) { + // Initialize the DP table with zeros + memset(dp, 0, sizeof(dp)); + dp[0][0] = 1; // Base case: 1 way to get sum 0 with 0 dice + + for (int i = 1; i <= n; i++) { // For each dice count + for (int j = 1; j <= target; j++) { // For each possible sum + dp[i][j] = 0; + for (int face = 1; face <= FACES; face++) { // For each face value + if (j - face >= 0) { // Ensure sum is non-negative + dp[i][j] += dp[i - 1][j - face]; + } + } + } + } + + return dp[n][target]; +} + +int main() { + int n, target; + printf("Enter number of dice: "); + scanf("%d", &n); + printf("Enter target sum: "); + scanf("%d", &target); + + int result = diceRollSum(n, target); + printf("Number of ways to reach the target sum %d with %d dice: %d\n", target, n, result); + + return 0; +} diff --git a/Dynamic Programming/FROM JUMP/README.md b/Dynamic Programming/FROM JUMP/README.md new file mode 100644 index 00000000..40113f09 --- /dev/null +++ b/Dynamic Programming/FROM JUMP/README.md @@ -0,0 +1,25 @@ +Minimum Energy to Reach the Last Stair +Problem Description +This program calculates the minimum energy required to reach the last stair in a series of stairs, given the heights of each stair. The energy required to move from one stair to another is calculated based on the height difference between the stairs. At each stair, you can either take a step to the next stair or skip one stair to move two steps ahead, each option consuming different amounts of energy. + +Approach +To solve this problem, a dynamic programming (DP) approach is used. The key idea is to minimize the energy required at each stair based on the energy consumption of the previous steps. + +Dynamic Programming Array: + +We use an array dp[], where dp[i] represents the minimum energy required to reach stair i. +Initialize dp[0] = 0, since no energy is required to start at the first stair. +Energy Calculation: + +For each stair i (starting from the second stair), calculate the minimum energy to reach that stair from either: +One step back: dp[i-1] + abs(height[i] - height[i-1]) +Two steps back: dp[i-2] + abs(height[i] - height[i-2]) (only if i > 1). +Choose the minimum of these two values to find the optimal (least energy-consuming) path to stair i. +Result: + +The last element in the dp array (dp[n-1]) gives the minimum energy required to reach the last stair. +Complexity +Time Complexity: O(n) since we calculate the minimum energy required for each stair once. +Space Complexity: O(n) for the dp array used to store the minimum energy at each stair. +Example +Given height = [10, 20, 30, 10], the function calculates that the minimum energy required to reach the last stair is 20. \ No newline at end of file diff --git a/Dynamic Programming/FROM JUMP/program.c b/Dynamic Programming/FROM JUMP/program.c new file mode 100644 index 00000000..063af0f7 --- /dev/null +++ b/Dynamic Programming/FROM JUMP/program.c @@ -0,0 +1,31 @@ +#include +#include + +int min(int a, int b) { + return (a < b) ? a : b; +} + +int minEnergy(int n, int height[]) { + int dp[n]; // Array to store the minimum energy for each stair + dp[0] = 0; // Starting point, so no energy required to be at the first stair + + // Fill the dp array from stair 1 to stair n-1 + for (int i = 1; i < n; i++) { + int oneStep = dp[i - 1] + abs(height[i] - height[i - 1]); + int twoStep = (i > 1) ? dp[i - 2] + abs(height[i] - height[i - 2]) : oneStep; + + dp[i] = min(oneStep, twoStep); // Take the minimum of the two options + } + + return dp[n - 1]; // Minimum energy required to reach the last stair +} + +int main() { + int n = 4; + int height[] = {10, 20, 30, 10}; + + int result = minEnergy(n, height); + printf("Minimum energy required: %d\n", result); + + return 0; +} diff --git a/Dynamic Programming/Scramble String/README.md b/Dynamic Programming/Scramble String/README.md new file mode 100644 index 00000000..f9e14991 --- /dev/null +++ b/Dynamic Programming/Scramble String/README.md @@ -0,0 +1,35 @@ +# Scramble String Problem + +## Description + +The Scramble String problem is to determine whether one string is a scrambled version of another. Given two strings, `s1` and `s2`, we say that `s2` is a scrambled version of `s1` if it can be formed by recursively dividing `s1` into two non-empty substrings and swapping them. + +For example: +- Input: `s1 = "great"`, `s2 = "rgeat"` +- Output: `true` (because "rgeat" is a scrambled version of "great") + +## Problem Requirements + +We need to: +1. Check if two strings contain the same characters. +2. Recursively verify if substrings can be swapped to form the scrambled string. +3. Optimize using memoization to avoid redundant calculations. + +## Solution Approach + +This solution uses **Dynamic Programming** with **Recursion and Memoization**: + +1. **Recursive Check**: + - For each possible split of `s1`, we recursively check: + - If dividing and not swapping substrings forms `s2`, or + - If dividing and swapping substrings forms `s2`. + +2. **Memoization Table**: + - We use a 3D table `memo[i1][i2][len]` to store results of subproblems where: + - `i1` and `i2` are starting indices in `s1` and `s2`. + - `len` is the length of the substrings. + - Each entry in the table can be either `-1` (not calculated), `1` (scramble), or `0` (not a scramble). + +3. **Complexity**: + - **Time Complexity**: `O(N^4)`, where `N` is the length of the string. + - **Space Complexity**: `O(N^3)` due to the memoization table. diff --git a/Dynamic Programming/Scramble String/program.c b/Dynamic Programming/Scramble String/program.c new file mode 100644 index 00000000..3cc87671 --- /dev/null +++ b/Dynamic Programming/Scramble String/program.c @@ -0,0 +1,60 @@ +#include +#include +#include + +#define MAX_LEN 100 + +// Memoization table to store results +int memo[MAX_LEN][MAX_LEN][MAX_LEN]; + +// Helper function to check if two strings have the same characters +bool haveSameCharacters(const char *s1, const char *s2, int len) { + int count[26] = {0}; + for (int i = 0; i < len; i++) { + count[s1[i] - 'a']++; + count[s2[i] - 'a']--; + } + for (int i = 0; i < 26; i++) { + if (count[i] != 0) return false; + } + return true; +} + +// Recursive function with memoization to check if s2 is a scrambled version of s1 +bool isScramble(const char *s1, const char *s2, int i1, int i2, int len) { + if (memo[i1][i2][len] != -1) return memo[i1][i2][len]; + + if (strncmp(s1 + i1, s2 + i2, len) == 0) return memo[i1][i2][len] = 1; + + if (!haveSameCharacters(s1 + i1, s2 + i2, len)) return memo[i1][i2][len] = 0; + + for (int i = 1; i < len; i++) { + if ((isScramble(s1, s2, i1, i2, i) && isScramble(s1, s2, i1 + i, i2 + i, len - i)) || + (isScramble(s1, s2, i1, i2 + len - i, i) && isScramble(s1, s2, i1 + i, i2, len - i))) { + return memo[i1][i2][len] = 1; + } + } + + return memo[i1][i2][len] = 0; +} + +bool isScrambleWrapper(const char *s1, const char *s2) { + int len = strlen(s1); + if (len != strlen(s2)) return false; + + memset(memo, -1, sizeof(memo)); + return isScramble(s1, s2, 0, 0, len); +} + +int main() { + const char *s1 = "great"; + const char *s2 = "rgeat"; + + if (isScrambleWrapper(s1, s2)) { + printf("'%s' is a scrambled version of '%s'\n", s2, s1); + } else { + printf("'%s' is NOT a scrambled version of '%s'\n", s2, s1); + } + + return 0; +} diff --git a/Graph Algorithms/Cycle detection using dfs/Readme.md b/Graph Algorithms/Cycle detection using dfs/Readme.md new file mode 100644 index 00000000..82a97089 --- /dev/null +++ b/Graph Algorithms/Cycle detection using dfs/Readme.md @@ -0,0 +1,49 @@ +# Longest Increasing Subsequence in C + +This program calculates the **Longest Increasing Subsequence (LIS)** in an array of integers, using dynamic programming. It allows users to input the array size and its elements, then outputs the length of the longest increasing subsequence. + +## How It Works + +The program uses a dynamic programming approach where: +- An auxiliary array, `lis`, is maintained to store the LIS length at each index. +- For each element, it calculates the maximum LIS that ends at that position by considering all previous elements. +- Finally, it finds the maximum value in the `lis` array to determine the length of the longest increasing subsequence. + +### Time Complexity +The time complexity of this approach is **O(n²)** due to the nested loops. + +### Space Complexity +The space complexity is **O(n)**, as it uses an auxiliary array `lis` of the same length as the input array. + +## Getting Started + +### Prerequisites + +- A C compiler (like GCC). + +### Running the Program + +1. **Clone the Repository** (optional if using version control): + ```bash + git clone https://github.com/your-username/longest-increasing-subsequence + cd longest-increasing-subsequence +2. **Compile the Code **: + +```bash + Copy code + gcc lis.c -o lis + +3. **Run the Program**: + +```bash + Copy code + ./lis + +**Example Usage** +```bash +Enter the number of elements in the array: 9 +Enter the elements of the array: +10 22 9 33 21 50 41 60 80 +Length of LIS is 6 + +In this example, the longest increasing subsequence is [10, 22, 33, 50, 60, 80] with a length of 6. \ No newline at end of file diff --git a/Graph Algorithms/Cycle detection using dfs/code.c b/Graph Algorithms/Cycle detection using dfs/code.c new file mode 100644 index 00000000..1c9cd245 --- /dev/null +++ b/Graph Algorithms/Cycle detection using dfs/code.c @@ -0,0 +1,49 @@ +#include + +int longestIncreasingSubsequence(int arr[], int n) +{ + int lis[n]; + for (int i = 0; i < n; i++) + lis[i] = 1; // Initialize LIS values for all indexes as 1 + + for (int i = 1; i < n; i++) + { + for (int j = 0; j < i; j++) + { + if (arr[i] > arr[j] && lis[i] < lis[j] + 1) + { + lis[i] = lis[j] + 1; + } + } + } + + int max = 0; + for (int i = 0; i < n; i++) + { + if (max < lis[i]) + max = lis[i]; + } + return max; +} + +int main() +{ + int n; + + // Take the number of elements as input + printf("Enter the number of elements in the array: "); + scanf("%d", &n); + + int arr[n]; + + // Take array elements as input + printf("Enter the elements of the array:\n"); + for (int i = 0; i < n; i++) + { + scanf("%d", &arr[i]); + } + + // Calculate and print the length of LIS + printf("Length of LIS is %d\n", longestIncreasingSubsequence(arr, n)); + return 0; +} diff --git a/Graph Algorithms/Graph_Reversal/README.md b/Graph Algorithms/Graph_Reversal/README.md new file mode 100644 index 00000000..984265a2 --- /dev/null +++ b/Graph Algorithms/Graph_Reversal/README.md @@ -0,0 +1,74 @@ +# Reverse a Directed Graph + +### Problem Statement: +Suppose we have a directed graph, we have to find its reverse so if an edge goes from i to j, it now goes from j to i. Here input will be an adjacency list, and if there are n nodes, the nodes will be (0, 1, ..., n-1). + +### Approach: +The approach to transposing a directed graph involves creating a new structure to hold reversed edges. For each vertex, the algorithm iterates through its outgoing edges and appends the vertex to the list of the target vertex in the new structure. This efficiently constructs the transposed graph in linear time relative to the number of vertices and edges. + +### Algorithm Steps: +1. **Define Structures**: + - Define an `AdjList` structure, a dynamic array to hold adjacent nodes for each node. + - Define a `Graph` structure containing an array of `AdjList`s to represent the adjacency lists for all nodes. + +2. **Initialize the Graph**: + - Create a function `initGraph` that initializes a `Graph` with a given number of nodes. + - For each node, initialize an empty `AdjList` with a small initial capacity. + +3. **Add an Edge**: + - Create a function `pushBack` for adding an edge to a node's `AdjList`. + - If the current capacity of the `AdjList` is full, double the capacity and reallocate memory. + +4. **Transpose the Graph**: + - Create a function `solve` to transpose the graph: + - Initialize a new `Graph` (the transposed graph) with the same number of nodes as the original. + - For each node in the original graph: + - For each node it points to, add the original node as an adjacent node in the transposed graph. + - Return the transposed graph. + +5. **Print the Graph**: + - Define a function `printGraph` to display the adjacency lists of the graph: + - For each node, print the list of nodes it points to. + +6. **Example Usage**: + - Initialize a graph with a specified number of nodes. + - Populate the graph by adding edges. + - Call the `solve` function to transpose the graph. + - Print the adjacency lists of the transposed graph. + +### Time Complexity: +- The time complexity of the program is `O(V + E)`, where `V` is the number of vertices and `E` is the number of edges, as it processes each vertex and edge once. + +### Sample Input: + +``` + {{1, 2}, {4}, {4}, {1, 2}, {3}} +``` +### Sample Output: + +``` + [[], [0, 3], [0, 3], [4], [1, 2]] +``` + +### Explanation of Sample: + +- The input `graph = [[1,2],[4],[4],[1,2],[3]]` represents a directed graph as an adjacency list where: + + - Node 0 points to nodes 1 and 2. + - Node 1 points to node 4. + - Node 2 points to node 4. + - Node 3 points to nodes 1 and 2. + - Node 4 points to node 3. + +- The reversed graph representation is `ans = [[], [0, 3], [0, 3], [4], [1, 2]]`, which means: + + - Node 0 has no outgoing edges. + - Nodes 1 and 2 have outgoing edges to nodes 0 and 3. + - Node 3 has an outgoing edge to node 4. + - Node 4 has outgoing edges to nodes 1 and 2. + +### Diagrammatic Representation of the input: +![directed_graph](https://github.com/user-attachments/assets/b3a8fc46-e508-45a0-bf97-458c2054f085) + +### Diagrammatic Representation of the output: +![directed_graph1](https://github.com/user-attachments/assets/7cb03236-3f1a-49cc-b2ce-6ef73aac7fa4) diff --git a/Graph Algorithms/Graph_Reversal/program.c b/Graph Algorithms/Graph_Reversal/program.c new file mode 100644 index 00000000..89afac37 --- /dev/null +++ b/Graph Algorithms/Graph_Reversal/program.c @@ -0,0 +1,81 @@ +#include +#include + +typedef struct { + int* data; // Dynamic array to store elements + int size; // Current size of the array + int capacity; // Current allocated capacity +} AdjList; + +typedef struct { + AdjList* arr; // Array of AdjLists representing the graph + int size; // Number of nodes in the graph +} Graph; + +// Initialize an AdjList with a given capacity +void initAdjList(AdjList* al, int initialCapacity) { + al->data = (int*)malloc(initialCapacity * sizeof(int)); + al->size = 0; + al->capacity = initialCapacity; +} + +// Adds a value to the AdjList, resizing only if necessary +void pushBack(AdjList* al, int value) { + if (al->size == al->capacity) { + al->capacity = al->capacity == 0 ? 2 : al->capacity * 2; + al->data = (int*)realloc(al->data, al->capacity * sizeof(int)); + } + al->data[al->size++] = value; +} + +// Initialize a graph with a given size (number of nodes) +void initGraph(Graph* g, int size) { + g->arr = (AdjList*)malloc(size * sizeof(AdjList)); + g->size = size; + for (int i = 0; i < size; ++i) { + initAdjList(&g->arr[i], 2); // Small initial capacity to reduce reallocations + } +} + +// Transpose the graph by reversing edges +Graph solve(Graph* graph) { + Graph ans; + initGraph(&ans, graph->size); + + for (int i = 0; i < graph->size; ++i) { + for (int j = 0; j < graph->arr[i].size; ++j) { + int x = graph->arr[i].data[j]; + pushBack(&ans.arr[x], i); // Add node 'i' to the list of node 'x' in the transposed graph + } + } + return ans; +} + +// Print the adjacency list of the graph +void printGraph(Graph* g) { + for (int i = 0; i < g->size; ++i) { + printf("[ "); + for (int j = 0; j < g->arr[i].size; ++j) { + printf("%d ", g->arr[i].data[j]); + } + printf("]\n"); + } +} + +int main() { + Graph graph; + initGraph(&graph, 5); + + // Building the adjacency list for the input graph + pushBack(&graph.arr[0], 1); + pushBack(&graph.arr[0], 2); + pushBack(&graph.arr[1], 4); + pushBack(&graph.arr[2], 4); + pushBack(&graph.arr[3], 1); + pushBack(&graph.arr[3], 2); + pushBack(&graph.arr[4], 3); + + Graph result = solve(&graph); + printGraph(&result); + return 0; +} diff --git a/Linked_list/Linkedlist_to_BST/program.c b/Linked_list/Linkedlist_to_BST/program.c new file mode 100644 index 00000000..273fa455 --- /dev/null +++ b/Linked_list/Linkedlist_to_BST/program.c @@ -0,0 +1,115 @@ +#include +#include + +// Define the structure for the linked list node +struct ListNode { + int val; + struct ListNode *next; +}; + +// Define the structure for the tree node (BST) +struct TreeNode { + int val; + struct TreeNode *left; + struct TreeNode *right; +}; + +// Function to create a new list node +struct ListNode* createListNode(int val) { + struct ListNode* newNode = (struct ListNode*)malloc(sizeof(struct ListNode)); + newNode->val = val; + newNode->next = NULL; + return newNode; +} + +// Function to create a new tree node +struct TreeNode* createTreeNode(int val) { + struct TreeNode* newNode = (struct TreeNode*)malloc(sizeof(struct TreeNode)); + newNode->val = val; + newNode->left = NULL; + newNode->right = NULL; + return newNode; +} + +// Function to find the size of the linked list +int getListSize(struct ListNode* head) { + int size = 0; + while (head) { + size++; + head = head->next; + } + return size; +} + +// Function to convert the linked list to a balanced BST +struct TreeNode* sortedListToBSTHelper(struct ListNode** headRef, int size) { + if (size <= 0) { + return NULL; + } + + // Recursively build the left subtree + struct TreeNode* left = sortedListToBSTHelper(headRef, size / 2); + + // The middle node will be the root of the current subtree + struct TreeNode* root = createTreeNode((*headRef)->val); + root->left = left; + + // Move the head pointer to the next node in the list + *headRef = (*headRef)->next; + + // Recursively build the right subtree + root->right = sortedListToBSTHelper(headRef, size - size / 2 - 1); + + return root; +} + +// Function to convert the sorted linked list to a balanced BST +struct TreeNode* sortedListToBST(struct ListNode* head) { + int size = getListSize(head); + return sortedListToBSTHelper(&head, size); +} + +// In-order traversal of the binary search tree (for testing) +void inorderTraversal(struct TreeNode* root) { + if (root) { + inorderTraversal(root->left); + printf("%d ", root->val); + inorderTraversal(root->right); + } +} + +// Helper function to create a sorted linked list +struct ListNode* createLinkedList(int arr[], int size) { + struct ListNode* head = NULL; + struct ListNode* temp = NULL; + for (int i = 0; i < size; i++) { + struct ListNode* newNode = createListNode(arr[i]); + if (head == NULL) { + head = newNode; + } else { + temp->next = newNode; + } + temp = newNode; + } + return head; +} + +int main() { + // Example sorted linked list: [-10, -3, 0, 5, 9] + int arr[] = {-10, -3, 0, 5, 9}; + int size = sizeof(arr) / sizeof(arr[0]); + + // Create the linked list from the array + struct ListNode* head = createLinkedList(arr, size); + + // Convert the sorted linked list to a balanced BST + struct TreeNode* root = sortedListToBST(head); + + // Print the in-order traversal of the tree + printf("In-order traversal of the balanced BST: "); + inorderTraversal(root); + printf("\n"); + + return 0; +} + diff --git a/Linked_list/Linkedlist_to_BST/readme.md b/Linked_list/Linkedlist_to_BST/readme.md new file mode 100644 index 00000000..c2e6b1e8 --- /dev/null +++ b/Linked_list/Linkedlist_to_BST/readme.md @@ -0,0 +1,42 @@ +# Conversion of Linked List to Balanced Binary Tree + +## Overview +To convert a sorted linked list into a balanced binary search tree (BST), we need to carefully choose the middle element of the linked list to maintain the balance of the tree. A balanced BST ensures that the left and right subtrees of every node have a minimal height difference, which optimizes search, insert, and delete operations. + +## Steps: +1. Calculate the size of the linked list: This helps in determining the middle element for the root. +2. Recursively build the BST by finding the middle element of the list and using it as the root. +3. Move the linked list pointer while constructing the tree to ensure we are processing the nodes in sequence. + +## Explanation: +1. ListNode and TreeNode Structures: + +- ListNode represents a node in the linked list with an integer value and a pointer to the next node. +- TreeNode represents a node in the binary search tree (BST), which contains a value, and pointers to its left and right children. +2. Helper Functions: + +- createListNode: Creates a new linked list node. +- createTreeNode: Creates a new tree node. +- getListSize: Finds the size of the linked list. +- sortedListToBSTHelper: Recursively builds the balanced BST. It takes the current head of the list (passed as a reference) and the size of the current list segment. +- sortedListToBST: Initializes the process of converting the linked list into a BST by calling the helper function with the list's size. +3. In-order Traversal: + +- The inorderTraversal function prints the tree nodes in sorted order (since it’s a binary search tree), which can help verify the correctness of the conversion. +- Helper Function to Create Linked List: + +- The createLinkedList function converts an array into a sorted linked list. This is helpful for testing the solution. +4. Main Function: + +- We create a sorted linked list [-10, -3, 0, 5, 9]. +- We convert this linked list into a balanced BST using the sortedListToBST function. +- The result is printed using in-order traversal, which should print the sorted elements of the BST in the same order. + +### Time Complexity: +- Finding the size of the linked list: **O(n)**, where n is the number of nodes in the list. +- Recursive BST construction: O(n), since each node of the linked list is processed exactly once. +Thus, the overall time complexity is O(n), where n is the number of nodes in the linked list. + +### Space Complexity: +- Recursive stack space: **O(log n)**, +- where n is the number of nodes in the linked list (since we are constructing a balanced tree). diff --git a/Linked_list/Merge_N_Ascending_Linked_Lists/README.md b/Linked_list/Merge_N_Ascending_Linked_Lists/README.md new file mode 100644 index 00000000..7bd335a5 --- /dev/null +++ b/Linked_list/Merge_N_Ascending_Linked_Lists/README.md @@ -0,0 +1,31 @@ +This section describes the implementation of a function in C that verifies how to merge n ascending linked lists. +## Problem Statement ## +Gives you an array of linked lists, each of which is already in ascending order. + +Please merge all linked lists into an ascending linked list and return the merged linked list. + +## Solution ## +Since the linked lists are already sorted, we can use a straightforward approach with a time complexity of O(nk log k), where n is the number of linked lists and k is the average number of nodes in each list. + +In the code, we defines a `ListNode` structure for the linked list, a `newNode` function to create new nodes, and the `mergeKLists` function to merge the lists. The `printList` function is used to print the merged list for verification. + +Here's the process: + +1. **Initialize a Dummy Head**: Create a dummy head node that will serve as the starting point of our merged list. This helps in simplifying the code when dealing with the head of the merged list. + +2. **Iterate with a Tail Pointer**: Use a tail pointer to build the merged list. This pointer will always point to the last node in the merged list. + +3. **Find the Minimum Node**: In each iteration, we traverse all the linked lists to find the node with the smallest value, keeping track of the smallest node and its index. + +4. **Link the Smallest Node**: Once Finding the smallest node, we link it to the tail of the merged list and move the tail pointer to this new node. After linking the smallest node, we can move to the next node in the list from which the smallest node was taken. Continue this process until we have traversed all lists and there are no more nodes to process. + +5. **Return the Merged List**: After the loop, we return the next of the dummy head node, which is the head of the merged list. + + + + + + + + + diff --git a/Linked_list/Merge_N_Ascending_Linked_Lists/program.c b/Linked_list/Merge_N_Ascending_Linked_Lists/program.c new file mode 100644 index 00000000..640ec024 --- /dev/null +++ b/Linked_list/Merge_N_Ascending_Linked_Lists/program.c @@ -0,0 +1,93 @@ +#include +#include + +// Define the structure for a singly linked list node. +struct ListNode { + int val; + struct ListNode *next; +}; + +// Function to create a new node with given value. +struct ListNode* newNode(int val) { + struct ListNode* node = (struct ListNode*)malloc(sizeof(struct ListNode)); + node->val = val; + node->next = NULL; + return node; +} + +// Function to merge k sorted linked lists into one sorted linked list. +struct ListNode* mergeKLists(struct ListNode** lists, int k) { + int size = k; + // Dummy head to simplify edge cases. + struct ListNode* dummyHead = newNode(0); + // Tail pointer to build the merged list. + struct ListNode* tail = dummyHead; + + while (1) { + // Node with the smallest value. + struct ListNode* minNode = NULL; + // Index of the list containing the minNode. + int minPointer = -1; + + for (int i = 0; i < size; i++) { + if (lists[i] == NULL) { + continue; + } + if (minNode == NULL || lists[i]->val < minNode->val) { + minNode = lists[i]; + minPointer = i; + } + } + + if (minPointer == -1) { + break; + } + // Link the smallest node to the merged list and move the tail pointer. + tail->next = minNode; + tail = tail->next; + // Move to the next node in the list from which minNode was taken. + lists[minPointer] = lists[minPointer]->next; + } + + // The merged list starts after the dummy head. + struct ListNode* head = dummyHead->next; + // Free the dummy head as it's not part of the actual list. + free(dummyHead); + return head; +} + +// Function to print the linked list starting from the given head node. +void printList(struct ListNode* head) { + while (head) { + printf("%d", head->val); + if (head->next) { + printf("->"); + } + head = head->next; + } + printf("\n"); +} + +// Main function to test the mergeKLists function. +int main() { + // Create three example linked lists. + struct ListNode* l1 = newNode(-1); + l1->next = newNode(4); + l1->next->next = newNode(5); + + struct ListNode* l2 = newNode(0); + l2->next = newNode(2); + l2->next->next = newNode(4996); + + struct ListNode* l3 = newNode(-10); + l3->next = newNode(8); + + struct ListNode* lists[] = {l1, l2, l3}; + int k = sizeof(lists) / sizeof(lists[0]); + + struct ListNode* mergedList = mergeKLists(lists, k); + + printList(mergedList); + + return 0; +} \ No newline at end of file diff --git a/Linked_list/unroll_linkedList/program.c b/Linked_list/unroll_linkedList/program.c new file mode 100644 index 00000000..e69de29b diff --git a/Linked_list/unroll_linkedList/readme.md b/Linked_list/unroll_linkedList/readme.md new file mode 100644 index 00000000..2a665720 --- /dev/null +++ b/Linked_list/unroll_linkedList/readme.md @@ -0,0 +1,27 @@ +# **UNROLLED LINKED LIST** +An **Unrolled Linked List** is a variation of a linked list where each node contains an array of elements rather than a single element. This setup reduces the number of nodes and can improve cache performance, especially for applications with high memory allocation or traversal requirements. + +Here’s how an **Unrolled Linked List** is typically structured: +- Each node has an array of elements. +- Each node maintains the count of elements it currently holds. +- When an array (node) is full, it splits into two nodes, maintaining balance. + +This structure is especially useful in memory-intensive applications or those that frequently iterate over list elements, like graphics rendering. + +### Explanation of Code: + +1. **Node Structure (`UnrolledNode`)**: Each node has a fixed-size array (`elements`) to hold multiple elements and a `count` to keep track of the number of elements in that node. + +2. **Insertion Logic**: + - Traverse to the last node in the list with available space. + - If the node is full, create a new node and split the current node's elements between the old and new nodes. + - Insert the new element in the appropriate node based on its value relative to the split elements. + +3. **Printing**: + - Each node's elements are printed to verify the structure and content of the list. + +### Notes: +- **NODE_CAPACITY**: This is set to 4 for simplicity, but in practice, it can be larger (often 16 or 32). +- **Performance**: Unrolled linked lists have better cache performance and lower memory overhead than standard linked lists due to fewer nodes. + +Unrolled linked lists are helpful in applications that benefit from reduced node traversal times, like text editors or data structures that require frequent insertions, deletions, and searches. diff --git a/Miscellaneous Algorithms/Adaptive Disk Reordering Algorithm/ADR.c b/Miscellaneous Algorithms/Adaptive Disk Reordering Algorithm/ADR.c new file mode 100644 index 00000000..eff29869 --- /dev/null +++ b/Miscellaneous Algorithms/Adaptive Disk Reordering Algorithm/ADR.c @@ -0,0 +1,79 @@ +#include +#include + +#define MAX_REQUESTS 100 + +// Structure to store each disk request with track number and status +typedef struct { + int track; + int is_serviced; +} DiskRequest; + +// Function to calculate absolute difference +int abs_diff(int a, int b) { + return a > b ? a - b : b - a; +} + +// Function to reorder requests dynamically based on current disk position +void reorder_requests(DiskRequest requests[], int num_requests, int current_head) { + for (int i = 0; i < num_requests - 1; i++) { + for (int j = i + 1; j < num_requests; j++) { + // Sort requests based on proximity to current head position + if (abs_diff(requests[i].track, current_head) > abs_diff(requests[j].track, current_head)) { + DiskRequest temp = requests[i]; + requests[i] = requests[j]; + requests[j] = temp; + } + } + } +} + +// Adaptive Disk Reordering (ADR) algorithm implementation +void adaptive_disk_reordering(DiskRequest requests[], int num_requests, int initial_head) { + int current_head = initial_head; + int total_seek_time = 0; + + printf("Seek Sequence: %d", current_head); + + for (int i = 0; i < num_requests; i++) { + reorder_requests(requests, num_requests, current_head); + + // Find the nearest unserviced request + for (int j = 0; j < num_requests; j++) { + if (!requests[j].is_serviced) { + int seek_time = abs_diff(current_head, requests[j].track); + total_seek_time += seek_time; + current_head = requests[j].track; + requests[j].is_serviced = 1; + + printf(" -> %d", current_head); + break; + } + } + } + + printf("\nTotal Seek Time: %d\n", total_seek_time); + printf("Average Seek Time: %.2f\n", (float)total_seek_time / num_requests); +} + +int main() { + int num_requests, initial_head; + + printf("Enter number of disk requests: "); + scanf("%d", &num_requests); + + DiskRequest requests[MAX_REQUESTS]; + printf("Enter track numbers for the requests:\n"); + for (int i = 0; i < num_requests; i++) { + printf("Request %d: ", i + 1); + scanf("%d", &requests[i].track); + requests[i].is_serviced = 0; + } + + printf("Enter initial head position: "); + scanf("%d", &initial_head); + + adaptive_disk_reordering(requests, num_requests, initial_head); + + return 0; +} diff --git a/Miscellaneous Algorithms/Adaptive Disk Reordering Algorithm/Readme.md b/Miscellaneous Algorithms/Adaptive Disk Reordering Algorithm/Readme.md new file mode 100644 index 00000000..9529335a --- /dev/null +++ b/Miscellaneous Algorithms/Adaptive Disk Reordering Algorithm/Readme.md @@ -0,0 +1,61 @@ +# Adaptive Disk Reordering (ADR) Algorithm + +This project provides a C implementation of the Adaptive Disk Reordering (ADR) algorithm, which dynamically adjusts the order of disk I/O requests to optimize disk access times. By prioritizing requests based on their proximity to the current disk head position, this algorithm minimizes seek time, making it ideal for applications requiring efficient disk scheduling. + +## Table of Contents +- [Overview](#overview) +- [Features](#features) +- [Algorithm Explanation](#algorithm-explanation) +- [Input and Output](#input-and-output) +- [Code Structure](#code-structure) +- [Example](#example) + +## Overview +The Adaptive Disk Reordering (ADR) algorithm reduces disk seek time by dynamically reordering I/O requests based on the disk head's current position. It enhances performance in environments with frequent disk access patterns by minimizing the back-and-forth movement of the disk arm, thus lowering the overall seek time and improving response time. + +## Features +- **Dynamic Request Reordering**: Orders pending I/O requests based on current disk head position to reduce seek time. +- **Seek Time Optimization**: Calculates total and average seek time, providing metrics to evaluate performance. +- **Efficient Disk Head Movement**: Minimizes disk head movement by servicing the nearest unserviced requests first. +- **Simple and Adaptable**: Easy to integrate into systems where frequent disk I/O occurs. + +## Algorithm Explanation +The ADR algorithm follows these steps: +1. **Current Head Position Check**: Starts by checking the current head position and orders requests based on their proximity to this position. +2. **Request Sorting**: Requests are sorted dynamically during each iteration, ensuring that the closest unserviced request is serviced next. +3. **Service Requests**: + - Services requests sequentially, moving the disk head to the nearest unserviced request and updating the seek time. +4. **Seek Time Calculation**: After all requests are serviced, the algorithm outputs the total and average seek times for evaluation. + +## Input and Output + +### Input +- **Number of Requests**: The total number of track requests (integer). +- **Track Requests**: An array containing track numbers of each request. +- **Initial Head Position**: The starting position of the disk head (integer). + +### Output +- **Seek Sequence**: The order in which tracks are accessed by the disk head. +- **Total Seek Time**: The cumulative distance the disk head traveled to service all requests. +- **Average Seek Time**: Average distance traveled per request. + +## Code Structure +The code for the ADR algorithm is organized as follows: +. +├── ADR.c # Main C file with the ADR implementation +└── Readme.md # Project documentation + +### Example + +Enter number of disk requests: 5 +Enter track numbers for the requests: +Request 1: 55 +Request 2: 14 +Request 3: 37 +Request 4: 98 +Request 5: 25 +Enter initial head position: 50 + +Seek Sequence: 50 -> 55 -> 37 -> 25 -> 14 -> 98 +Total Seek Time: 96 +Average Seek Time: 19.20 diff --git a/Miscellaneous Algorithms/Multi-Tiered Caching Algorithm/MTC.c b/Miscellaneous Algorithms/Multi-Tiered Caching Algorithm/MTC.c new file mode 100644 index 00000000..d8f6e9bd --- /dev/null +++ b/Miscellaneous Algorithms/Multi-Tiered Caching Algorithm/MTC.c @@ -0,0 +1,91 @@ +#include +#include + +#define L1_CACHE_SIZE 3 +#define L2_CACHE_SIZE 5 + +typedef struct Cache { + int *data; + int size; + int count; +} Cache; + +// Initialize cache +Cache *initialize_cache(int size) { + Cache *cache = (Cache *)malloc(sizeof(Cache)); + cache->data = (int *)malloc(size * sizeof(int)); + cache->size = size; + cache->count = 0; + return cache; +} + +// Check if a value exists in cache and return its position +int find_in_cache(Cache *cache, int value) { + for (int i = 0; i < cache->count; i++) { + if (cache->data[i] == value) { + return i; + } + } + return -1; +} + +// Add value to cache with FIFO replacement +void add_to_cache(Cache *cache, int value) { + if (cache->count < cache->size) { + cache->data[cache->count++] = value; + } else { + // Shift data and add new value at the end + for (int i = 1; i < cache->size; i++) { + cache->data[i - 1] = cache->data[i]; + } + cache->data[cache->size - 1] = value; + } +} + +// Multi-tiered caching function +void multi_tiered_cache(Cache *L1, Cache *L2, int value) { + int pos_in_L1 = find_in_cache(L1, value); + int pos_in_L2 = find_in_cache(L2, value); + + if (pos_in_L1 != -1) { + printf("Value %d found in L1 cache.\n", value); + } else if (pos_in_L2 != -1) { + printf("Value %d found in L2 cache. Moving to L1.\n", value); + // Move from L2 to L1 cache + add_to_cache(L1, value); + // Remove from L2 (by shifting) + for (int i = pos_in_L2; i < L2->count - 1; i++) { + L2->data[i] = L2->data[i + 1]; + } + L2->count--; + } else { + printf("Value %d not found in L1 or L2. Adding to L1 and L2.\n", value); + add_to_cache(L1, value); + add_to_cache(L2, value); + } +} + +// Free allocated memory for cache +void free_cache(Cache *cache) { + free(cache->data); + free(cache); +} + +// Main function to test multi-tiered caching +int main() { + Cache *L1 = initialize_cache(L1_CACHE_SIZE); + Cache *L2 = initialize_cache(L2_CACHE_SIZE); + + int requests[] = {10, 20, 10, 30, 40, 50, 20, 60, 70, 10}; + int num_requests = sizeof(requests) / sizeof(requests[0]); + + for (int i = 0; i < num_requests; i++) { + multi_tiered_cache(L1, L2, requests[i]); + } + + // Free memory + free_cache(L1); + free_cache(L2); + + return 0; +} diff --git a/Miscellaneous Algorithms/Multi-Tiered Caching Algorithm/Readme.md b/Miscellaneous Algorithms/Multi-Tiered Caching Algorithm/Readme.md new file mode 100644 index 00000000..b011c181 --- /dev/null +++ b/Miscellaneous Algorithms/Multi-Tiered Caching Algorithm/Readme.md @@ -0,0 +1,61 @@ +# Multi-Tiered Caching (MTC) Algorithm + +This project implements a Multi-Tiered Caching (MTC) algorithm in C. The MTC algorithm manages multiple cache levels to improve data retrieval efficiency by storing frequently accessed data in faster, higher-priority caches. It dynamically moves data between cache levels based on access patterns, reducing retrieval time and optimizing memory utilization in systems with large data workloads. + +## Table of Contents + +- [Overview](#overview) +- [Features](#features) +- [Algorithm Explanation](#algorithm-explanation) +- [Input and Output](#input-and-output) + + +## Overview + +The Multi-Tiered Caching (MTC) algorithm uses multiple cache levels (e.g., L1, L2) to store frequently accessed data closer to the processor, reducing data retrieval time. This approach is particularly useful for systems with limited memory and a high volume of data requests, as it minimizes access time and improves memory management. + +## Features + +- Multi-tiered caching system with multiple cache levels (e.g., L1 and L2). +- Caching based on access frequency, moving data between levels as needed. +- Simple FIFO (First-In-First-Out) replacement policy in each cache tier. +- Efficient data access management for large datasets and high-throughput applications. + +## Algorithm Explanation + +1. **Cache Initialization**: Allocate memory for each cache level with a predefined size (L1 and L2). +2. **Data Lookup**: + - Check if the data exists in the higher-priority cache (L1). + - If not in L1, search the lower-priority cache (L2). +3. **Data Movement**: + - If found in L2, move the data to L1 for quicker access in future requests. + - If not found in either cache, add it to both L1 and L2 caches. +4. **Replacement Policy**: Uses a First-In-First-Out (FIFO) approach for data replacement, removing the oldest entry when the cache is full. + +### Input + +- **Data Requests**: An array of integers representing the data access requests. +- **L1 and L2 Cache Sizes**: Fixed cache sizes for each level (e.g., L1 with 3 slots, L2 with 5 slots). + +### Output + +The program will output the following for each request: +- Whether the requested data was found in L1, L2, or was not found. +- Any movement between cache levels when data is accessed. + +#### Example Input +Requests: {10, 20, 10, 30, 40, 50, 20, 60, 70, 10} +L1 Cache Size: 3 +L2 Cache Size: 5 + +### Example Output +Value 10 added to L1 and L2. +Value 20 added to L1 and L2. +Value 10 found in L1 cache. +Value 30 added to L1 and L2. +Value 40 added to L1 and L2. +Value 50 added to L1 and L2. +Value 20 found in L1 cache. +Value 60 added to L1 and L2. +Value 70 added to L1 and L2. +Value 10 moved from L2 to L1. diff --git a/Miscellaneous Algorithms/Power-Aware Disk Scheduling algorithm/PADS.c b/Miscellaneous Algorithms/Power-Aware Disk Scheduling algorithm/PADS.c new file mode 100644 index 00000000..6214cbff --- /dev/null +++ b/Miscellaneous Algorithms/Power-Aware Disk Scheduling algorithm/PADS.c @@ -0,0 +1,129 @@ +#include +#include +#include + +#define MAX_REQUESTS 100 +#define DISK_SIZE 200 // Represents the number of tracks on the disk +#define LOW_POWER_THRESHOLD 20 // Threshold for "low-power" mode + +// Function to sort requests +void sort_requests(int requests[], int n) { + for (int i = 0; i < n - 1; i++) { + for (int j = i + 1; j < n; j++) { + if (requests[i] > requests[j]) { + int temp = requests[i]; + requests[i] = requests[j]; + requests[j] = temp; + } + } + } +} + +int power_aware_scan(int requests[], int n, int head, int direction) { + int seek_time = 0; + int total_seek_time = 0; + int distance; + int current_head = head; + bool low_power_mode = false; + + // Sort requests for efficient scan-based scheduling + sort_requests(requests, n); + + // Separate requests to the left and right of the head + int left[MAX_REQUESTS], right[MAX_REQUESTS]; + int left_count = 0, right_count = 0; + + for (int i = 0; i < n; i++) { + if (requests[i] < head) { + left[left_count++] = requests[i]; + } else { + right[right_count++] = requests[i]; + } + } + + // Implement the SCAN algorithm with power-awareness + if (direction == 1) { // Moving towards higher tracks + for (int i = 0; i < right_count; i++) { + distance = abs(current_head - right[i]); + if (distance > LOW_POWER_THRESHOLD) { + printf("Entering low-power mode to save energy.\n"); + low_power_mode = true; + } else { + printf("Servicing request at track %d\n", right[i]); + seek_time = distance; + current_head = right[i]; + total_seek_time += seek_time; + low_power_mode = false; + } + } + // Reverse direction after reaching the highest track + for (int i = left_count - 1; i >= 0; i--) { + distance = abs(current_head - left[i]); + if (distance > LOW_POWER_THRESHOLD) { + printf("Entering low-power mode to save energy.\n"); + low_power_mode = true; + } else { + printf("Servicing request at track %d\n", left[i]); + seek_time = distance; + current_head = left[i]; + total_seek_time += seek_time; + low_power_mode = false; + } + } + } else { // Moving towards lower tracks + for (int i = left_count - 1; i >= 0; i--) { + distance = abs(current_head - left[i]); + if (distance > LOW_POWER_THRESHOLD) { + printf("Entering low-power mode to save energy.\n"); + low_power_mode = true; + } else { + printf("Servicing request at track %d\n", left[i]); + seek_time = distance; + current_head = left[i]; + total_seek_time += seek_time; + low_power_mode = false; + } + } + // Reverse direction after reaching the lowest track + for (int i = 0; i < right_count; i++) { + distance = abs(current_head - right[i]); + if (distance > LOW_POWER_THRESHOLD) { + printf("Entering low-power mode to save energy.\n"); + low_power_mode = true; + } else { + printf("Servicing request at track %d\n", right[i]); + seek_time = distance; + current_head = right[i]; + total_seek_time += seek_time; + low_power_mode = false; + } + } + } + + return total_seek_time; +} + +int main() { + int n, head, direction; + int requests[MAX_REQUESTS]; + + // Get input for requests + printf("Enter the number of disk requests: "); + scanf("%d", &n); + printf("Enter the disk requests: "); + for (int i = 0; i < n; i++) { + scanf("%d", &requests[i]); + } + printf("Enter the initial position of the disk head: "); + scanf("%d", &head); + printf("Enter the initial direction (1 for high, 0 for low): "); + scanf("%d", &direction); + + // Run the power-aware scan algorithm + int total_seek_time = power_aware_scan(requests, n, head, direction); + + printf("Total seek time: %d\n", total_seek_time); + printf("Average seek time: %.2f\n", (float)total_seek_time / n); + + return 0; +} diff --git a/Miscellaneous Algorithms/Power-Aware Disk Scheduling algorithm/Readme.md b/Miscellaneous Algorithms/Power-Aware Disk Scheduling algorithm/Readme.md new file mode 100644 index 00000000..7714a6ec --- /dev/null +++ b/Miscellaneous Algorithms/Power-Aware Disk Scheduling algorithm/Readme.md @@ -0,0 +1,73 @@ +# Power-Aware Disk Scheduling (PADS) Algorithm + +This project implements the Power-Aware Disk Scheduling (PADS) algorithm in C, a modified disk scheduling technique designed to reduce power consumption during disk I/O operations. The PADS algorithm optimizes disk head movement by balancing request servicing with energy-saving mechanisms, simulating a "low-power" mode when disk head movement would be excessive. + +## Table of Contents + +- [Overview](#overview) +- [Features](#features) +- [Algorithm Explanation](#algorithm-explanation) +- [Input and Output](#input-and-output) +- [Code Structure](#code-structure) +- [Example](#example) + +## Overview + +The Power-Aware Disk Scheduling (PADS) algorithm aims to enhance energy efficiency in systems by reducing unnecessary disk head movements. By implementing a modified SCAN (Elevator) scheduling algorithm, PADS can selectively enter a "low-power" mode if the next request is far from the current disk head position. This reduces power consumption while still achieving acceptable seek times and balancing I/O performance. + +## Features + +- **Efficient Disk I/O**: Services requests in a sequence to minimize disk head movement. +- **Power-Aware Mode**: Enters "low-power" mode when the disk head movement exceeds a specified threshold. +- **Flexible Direction Control**: Allows initial direction specification (toward higher or lower track numbers). +- **Seek Time Calculation**: Outputs total and average seek time for performance analysis. + +## Algorithm Explanation + +1. **Initial Direction**: The disk head starts moving in a specified direction (either toward higher or lower track numbers). +2. **Sorting Requests**: Requests are sorted to prioritize servicing those in the head's current path, ensuring minimal head movement. +3. **Low-Power Mode Activation**: If the distance to the next request exceeds a defined threshold, the algorithm simulates a "low-power" mode, reducing head movement until closer requests are available. +4. **Seek Time Calculation**: The algorithm calculates the total distance the head moved to service all requests, helping evaluate performance. + +## Input and Output + +### Input + +- **Number of Requests**: Total number of track requests. +- **Track Requests**: Array of requested track positions. +- **Initial Head Position**: Starting position of the disk head. +- **Disk Size**: Total size of the disk (maximum track number). +- **Initial Direction**: Starting direction of the head (1 for high, 0 for low). + +### Output + +- **Seek Sequence**: The sequence of tracks serviced by the disk head. +- **Total Seek Time**: Total distance the disk head moved. +- **Average Seek Time**: Average seek time per request, useful for performance evaluation. + +## Code Structure + +├── pads_algorithm.c # Contains the PADS algorithm implementation +├── README.md # Project documentation +└── LICENSE # License information + +## Example + +### Input: + +Enter the number of disk requests: 5 +Enter the disk requests: 45 130 10 180 90 +Enter the initial position of the disk head: 50 +Enter the initial direction (1 for high, 0 for low): 1 + +### Output: + +Servicing request at track 90 +Servicing request at track 130 +Servicing request at track 180 +Entering low-power mode to save energy. +Servicing request at track 45 +Servicing request at track 10 + +Total seek time: 250 +Average seek time: 50.00 diff --git a/Miscellaneous Algorithms/Predictive Scheduling/Readme.md b/Miscellaneous Algorithms/Predictive Scheduling/Readme.md new file mode 100644 index 00000000..09de3354 --- /dev/null +++ b/Miscellaneous Algorithms/Predictive Scheduling/Readme.md @@ -0,0 +1,85 @@ + + +# Predictive Scheduling in C + +## Overview + +This project implements a predictive scheduling algorithm using the Shortest Job First (SJF) method. The goal of predictive scheduling is to minimize the overall wait time by predicting each process's burst time and scheduling processes accordingly. The prediction model here uses exponential averaging, which calculates the predicted burst time based on previous burst times. + +## Features + +- Predicts the next burst time for each process using exponential averaging. +- Sorts processes based on their predicted burst times to simulate a "Shortest Predicted Job First" (SPJF) scheduling strategy. +- Adjustable smoothing factor (`alpha`) for predictions, making the model adaptable to different levels of variation in process burst times. + +## Getting Started + +### Prerequisites +- A C compiler (e.g., GCC) + +### Compilation +To compile the program, run: +```bash +gcc predictive_scheduling.c -o predictive_scheduling +``` + +### Usage +To run the program: +```bash +./predictive_scheduling +``` + +You will be prompted to: +1. Enter the number of processes. +2. Enter each process’s burst time. + +### Example +``` +Enter number of processes: 3 +Enter burst time for process 1: 6 +Enter burst time for process 2: 2 +Enter burst time for process 3: 8 + +Scheduled Processes (in order of predicted burst time): +PID Burst Time Predicted Burst Time +2 2 2 +1 6 4 +3 8 5 +``` + +## Code Explanation + +### Key Components + +1. **Predictive Burst Time Calculation**: Uses exponential averaging to predict each process's next burst time. +2. **Sorting by Predicted Burst Time**: Processes are sorted in ascending order of their predicted burst times to simulate a predictive scheduling strategy. + +### Parameters + +- `alpha`: Smoothing factor used for exponential averaging. A value of 0.5 is commonly used, but you can adjust it to make the prediction more or less sensitive to recent burst times. + +## Complexity Analysis + +### Time Complexity + +1. **Predictive Burst Time Calculation**: This step is `O(n)`, where `n` is the number of processes. For each process, we calculate the predicted burst based on the previous burst time. + +2. **Sorting Processes by Predicted Burst Time**: The sorting step has a time complexity of `O(n^2)` due to the bubble sort used here. This can be optimized to `O(n log n)` if we replace it with a more efficient sorting algorithm like quicksort or mergesort. + + **Overall Time Complexity**: `O(n^2)` (with bubble sort), but it can be improved to `O(n log n)` with a more efficient sorting algorithm. + +### Space Complexity + +1. **Process Storage**: We store `n` processes, each with three attributes (`pid`, `burstTime`, and `predictedBurst`), resulting in a space complexity of `O(n)`. + +2. **Auxiliary Space**: Sorting does not require additional memory for bubble sort; hence, auxiliary space complexity is `O(1)`. + + **Overall Space Complexity**: `O(n)` + +## Improvements + +- **Sorting Optimization**: Replace bubble sort with quicksort or mergesort to improve time complexity. +- **Dynamic Prediction Models**: Implement other predictive models like linear regression for more accurate burst time predictions based on larger historical data. + + + diff --git a/Miscellaneous Algorithms/Predictive Scheduling/program.c b/Miscellaneous Algorithms/Predictive Scheduling/program.c new file mode 100644 index 00000000..7c472bad --- /dev/null +++ b/Miscellaneous Algorithms/Predictive Scheduling/program.c @@ -0,0 +1,63 @@ +#include + +#define MAX_PROCESSES 10 + +struct Process { + int pid; + int burstTime; + int predictedBurst; +}; + +// Function to calculate predicted burst time using exponential averaging +int predictBurstTime(int previousBurst, int prevPredicted, float alpha) { + return (int)(alpha * previousBurst + (1 - alpha) * prevPredicted); +} + +void sortByPredictedBurst(struct Process processes[], int n) { + struct Process temp; + for (int i = 0; i < n - 1; i++) { + for (int j = i + 1; j < n; j++) { + if (processes[i].predictedBurst > processes[j].predictedBurst) { + temp = processes[i]; + processes[i] = processes[j]; + processes[j] = temp; + } + } + } +} + +int main() { + int n; + float alpha = 0.5; // Smoothing factor for prediction (0 < alpha < 1) + + printf("Enter number of processes: "); + scanf("%d", &n); + + struct Process processes[MAX_PROCESSES]; + + // Input burst times for each process + for (int i = 0; i < n; i++) { + processes[i].pid = i + 1; + printf("Enter burst time for process %d: ", processes[i].pid); + scanf("%d", &processes[i].burstTime); + + // Initial prediction, assume it’s the same as the first burst time + processes[i].predictedBurst = processes[i].burstTime; + } + + // Predict burst time for each process (starting from the second burst) + for (int i = 1; i < n; i++) { + processes[i].predictedBurst = predictBurstTime(processes[i - 1].burstTime, processes[i - 1].predictedBurst, alpha); + } + + // Sort processes by predicted burst time + sortByPredictedBurst(processes, n); + + printf("\nScheduled Processes (in order of predicted burst time):\n"); + printf("PID\tBurst Time\tPredicted Burst Time\n"); + for (int i = 0; i < n; i++) { + printf("%d\t%d\t\t%d\n", processes[i].pid, processes[i].burstTime, processes[i].predictedBurst); + } + + return 0; +} diff --git a/Miscellaneous Algorithms/Priority Aging Round Robin (PARR)/Readme.md b/Miscellaneous Algorithms/Priority Aging Round Robin (PARR)/Readme.md new file mode 100644 index 00000000..ab1222a2 --- /dev/null +++ b/Miscellaneous Algorithms/Priority Aging Round Robin (PARR)/Readme.md @@ -0,0 +1,102 @@ +Overview +The Priority Aging Round Robin (PARR) algorithm is an enhanced version of the Round Robin (RR) scheduling algorithm, which incorporates priority aging to ensure that processes with lower priority eventually get executed. This algorithm is designed to combine the fairness of Round Robin with the flexibility of Priority Scheduling, preventing starvation for lower-priority processes by "aging" their priorities over time. + +Round Robin (RR): Assigns a fixed time slice (or quantum) to each process in a cyclic order. It's known for its simplicity and fairness, but it does not consider process priority, which can lead to low-priority tasks being starved if high-priority tasks keep getting executed. + +Priority Aging: A technique where the priority of a process gradually increases the longer it waits. This ensures that low-priority processes are eventually scheduled for execution. + +How the Algorithm Works +Initialization: + +Each process is assigned an initial priority. +A time quantum is set (for example, 10 ms). +Round Robin Scheduling: + +The processes are placed in a queue. +In each cycle, a process is assigned a time slice (time quantum) to execute. +After each quantum, the process is moved to the back of the queue if it hasn't finished. +Priority Aging: + +The priority of each process increases gradually after every cycle it waits (i.e., each time it is not executed). +The priority increment ensures that lower-priority processes get scheduled sooner as their priority increases over time. +Execution: + +The process with the highest priority is given the CPU first. If two processes have the same priority, Round Robin scheduling is used to determine which process gets executed. +Once a process completes its quantum or finishes execution, it is removed from the queue. +Reordering: + +After each cycle, the process queue is reordered based on the current priorities of the processes. This ensures that processes with higher priority are executed first. +Theory Behind PARR +Fairness: Like Round Robin, PARR ensures that every process gets a chance to execute for a fixed time slice, thereby preventing any one process from monopolizing the CPU. +Starvation Prevention: The priority aging mechanism prevents starvation by gradually increasing the priority of waiting processes, ensuring that lower-priority processes get executed eventually. +Dynamic Prioritization: PARR dynamically adjusts priorities, allowing the system to adapt to changing workload conditions by favoring processes that have been waiting longer or are in danger of starvation. +Time and Space Complexity +Time Complexity: +Round Robin Scheduling: Each process is executed in a cyclic manner, and a time slice is allocated to each process. Thus, for n processes, it will take O(n) time to complete one cycle, assuming the time quantum is constant. + +Priority Aging: The aging process involves updating the priority of each process every time it doesn't execute. The time complexity for priority aging in each cycle is O(n) as it requires a check and update of the priority for each process. + +Reordering the Queue: After each quantum, the process queue may need to be reordered based on the updated priorities. This can be done in O(n log n) time using a sorting algorithm like QuickSort or MergeSort. + +Overall Time Complexity: In the worst case, the overall time complexity is O(n log n) due to the need to sort the processes by priority after each cycle. + +Space Complexity: +Space Complexity: The space complexity is O(n) because the algorithm uses a queue to store n processes and their associated data (including priorities and time slices). The size of the queue remains proportional to the number of processes. + +README: Priority Aging Round Robin (PARR) Algorithm +Overview +The Priority Aging Round Robin (PARR) algorithm is an enhanced version of the Round Robin (RR) scheduling algorithm, which incorporates priority aging to ensure that processes with lower priority eventually get executed. This algorithm is designed to combine the fairness of Round Robin with the flexibility of Priority Scheduling, preventing starvation for lower-priority processes by "aging" their priorities over time. + +Round Robin (RR): Assigns a fixed time slice (or quantum) to each process in a cyclic order. It's known for its simplicity and fairness, but it does not consider process priority, which can lead to low-priority tasks being starved if high-priority tasks keep getting executed. + +Priority Aging: A technique where the priority of a process gradually increases the longer it waits. This ensures that low-priority processes are eventually scheduled for execution. + +How the Algorithm Works +Initialization: + +Each process is assigned an initial priority. +A time quantum is set (for example, 10 ms). +Round Robin Scheduling: + +The processes are placed in a queue. +In each cycle, a process is assigned a time slice (time quantum) to execute. +After each quantum, the process is moved to the back of the queue if it hasn't finished. +Priority Aging: + +The priority of each process increases gradually after every cycle it waits (i.e., each time it is not executed). +The priority increment ensures that lower-priority processes get scheduled sooner as their priority increases over time. +Execution: + +The process with the highest priority is given the CPU first. If two processes have the same priority, Round Robin scheduling is used to determine which process gets executed. +Once a process completes its quantum or finishes execution, it is removed from the queue. +Reordering: + +After each cycle, the process queue is reordered based on the current priorities of the processes. This ensures that processes with higher priority are executed first. +Theory Behind PARR +Fairness: Like Round Robin, PARR ensures that every process gets a chance to execute for a fixed time slice, thereby preventing any one process from monopolizing the CPU. +Starvation Prevention: The priority aging mechanism prevents starvation by gradually increasing the priority of waiting processes, ensuring that lower-priority processes get executed eventually. +Dynamic Prioritization: PARR dynamically adjusts priorities, allowing the system to adapt to changing workload conditions by favoring processes that have been waiting longer or are in danger of starvation. +Time and Space Complexity +Time Complexity: +Round Robin Scheduling: Each process is executed in a cyclic manner, and a time slice is allocated to each process. Thus, for n processes, it will take O(n) time to complete one cycle, assuming the time quantum is constant. + +Priority Aging: The aging process involves updating the priority of each process every time it doesn't execute. The time complexity for priority aging in each cycle is O(n) as it requires a check and update of the priority for each process. + +Reordering the Queue: After each quantum, the process queue may need to be reordered based on the updated priorities. This can be done in O(n log n) time using a sorting algorithm like QuickSort or MergeSort. + +Overall Time Complexity: In the worst case, the overall time complexity is O(n log n) due to the need to sort the processes by priority after each cycle. + +Space Complexity: +Space Complexity: The space complexity is O(n) because the algorithm uses a queue to store n processes and their associated data (including priorities and time slices). The size of the queue remains proportional to the number of processes. + +Key Components in the Code: +Process Structure: Each process has an id, priority, and remaining_time. +Priority Aging: The priorityAging function increases the priority of processes that have not completed execution. +Round Robin Execution: The roundRobin function simulates the Round Robin scheduling, applying the time quantum and updating the process states after each cycle. +Advantages of PARR: +Fairness: All processes are given a fair share of the CPU through Round Robin scheduling. +Prevents Starvation: The priority aging ensures that even lower-priority processes eventually get executed, preventing starvation. +Adaptability: By dynamically adjusting priorities, PARR adapts to changing system conditions and workload distributions. +Disadvantages of PARR: +Overhead: Priority aging and reordering the queue introduce some overhead, which might reduce the system's overall efficiency. +Complexity: Compared to simple Round Robin, PARR adds complexity due to priority management and dynamic adjustments. diff --git a/Miscellaneous Algorithms/Priority Aging Round Robin (PARR)/pragram.c b/Miscellaneous Algorithms/Priority Aging Round Robin (PARR)/pragram.c new file mode 100644 index 00000000..dbd9ab03 --- /dev/null +++ b/Miscellaneous Algorithms/Priority Aging Round Robin (PARR)/pragram.c @@ -0,0 +1,66 @@ +#include +#include + +#define MAX_PROCESSES 10 +#define TIME_QUANTUM 10 + +// Structure to represent a process +typedef struct { + int id; // Process ID + int priority; // Process priority + int remaining_time; // Remaining time to complete execution +} Process; + +// Function to apply priority aging +void priorityAging(Process processes[], int n) { + for (int i = 0; i < n; i++) { + if (processes[i].remaining_time > 0) { + processes[i].priority++; // Increase priority for waiting processes + } + } +} + +// Function to perform Round Robin scheduling with priority aging +void roundRobin(Process processes[], int n) { + int completed = 0; + + // Execute until all processes are completed + while (completed < n) { + for (int i = 0; i < n; i++) { + if (processes[i].remaining_time > 0) { + // Execute the process for TIME_QUANTUM or remaining time + printf("Executing process %d with priority %d\n", processes[i].id, processes[i].priority); + + int execution_time = (processes[i].remaining_time <= TIME_QUANTUM) ? processes[i].remaining_time : TIME_QUANTUM; + processes[i].remaining_time -= execution_time; + + // Check if the process is completed + if (processes[i].remaining_time == 0) { + printf("Process %d completed.\n", processes[i].id); + completed++; + } + } + } + + // Apply priority aging after each round + priorityAging(processes, n); + } +} + +// Main function to execute the PARR algorithm +int main() { + // Define processes with their IDs, initial priorities, and remaining times + Process processes[MAX_PROCESSES] = { + {1, 1, 50}, + {2, 2, 30}, + {3, 3, 40}, + {4, 1, 20} + }; + + int n = 4; // Number of processes + + // Execute Round Robin scheduling with priority aging + roundRobin(processes, n); + + return 0; +} diff --git a/Miscellaneous Algorithms/proportional scheduling/program.c b/Miscellaneous Algorithms/proportional scheduling/program.c new file mode 100644 index 00000000..5f36a709 --- /dev/null +++ b/Miscellaneous Algorithms/proportional scheduling/program.c @@ -0,0 +1,66 @@ +#include +#include +#include + +#define NUM_PROCESSES 5 + +// Structure for process details +struct Process { + int pid; // Process ID + int burstTime; // Burst time of the process + int group; // Group ID (for fair share) +}; + +// Function to simulate fair share scheduling +void fairShareScheduling(struct Process processes[], int numProcesses) { + int timeQuantum = 2; // Time quantum for each round + int timeElapsed = 0; + + // Keep track of remaining burst times + int remainingBurst[numProcesses]; + for (int i = 0; i < numProcesses; i++) { + remainingBurst[i] = processes[i].burstTime; + } + + printf("Starting Fair Share Scheduling...\n"); + + // Continue until all processes are done + while (1) { + int allDone = 1; + + for (int i = 0; i < numProcesses; i++) { + if (remainingBurst[i] > 0) { + allDone = 0; + + // Execute for time quantum or remaining burst time + int execTime = (remainingBurst[i] > timeQuantum) ? timeQuantum : remainingBurst[i]; + remainingBurst[i] -= execTime; + timeElapsed += execTime; + + printf("Process %d (Group %d) ran for %d units\n", processes[i].pid, processes[i].group, execTime); + + // If process finished + if (remainingBurst[i] == 0) { + printf("Process %d completed at time %d\n", processes[i].pid, timeElapsed); + } + } + } + + // Check if all processes are done + if (allDone) break; + } +} + +int main() { + struct Process processes[NUM_PROCESSES] = { + {1, 8, 1}, // Process ID, Burst Time, Group + {2, 4, 2}, + {3, 9, 1}, + {4, 5, 2}, + {5, 7, 1} + }; + + fairShareScheduling(processes, NUM_PROCESSES); + + return 0; +} diff --git a/Miscellaneous Algorithms/proportional scheduling/readme.md b/Miscellaneous Algorithms/proportional scheduling/readme.md new file mode 100644 index 00000000..25b4598c --- /dev/null +++ b/Miscellaneous Algorithms/proportional scheduling/readme.md @@ -0,0 +1,39 @@ +fair share is another name of proportional scheduling.. +Fair Share Scheduling is a scheduling strategy that distributes CPU time fairly among users or groups of users. Instead of focusing solely on individual processes, fair share scheduling ensures that each user or group gets a specified proportion of CPU time, helping prevent any single user or group from monopolizing resources. + +Description +In a fair share scheduling system, the CPU is allocated based on user-defined shares or groups. Each group is given an equal or specified share of CPU resources. For example, if two users each have processes that need CPU time, the scheduler will ensure both users receive a fair amount of CPU time, regardless of how many processes each user has running. + +The scheduler operates in rounds (usually called time slices or time quanta) and allocates CPU time to processes within each user group. If a user has more processes than another user, the time is divided among that user's processes accordingly. This way, fair share scheduling attempts to prevent cases where one user's processes consume an excessive amount of CPU time, starving other users. + +Advantages (Pros) +Fairness Across Users or Groups: Fair share scheduling ensures that each user or group receives an equitable share of CPU time, promoting fairness in resource allocation. + +Prevents Starvation: By ensuring each group gets a proportion of CPU time, this method prevents any user or process from monopolizing the CPU, reducing the chance of starvation for low-priority users or processes. + +Customizable Resource Distribution: It can be configured to assign specific shares based on group importance, enabling priority allocation to certain users or critical processes. + +Enhanced Multitasking: By distributing CPU time fairly, it improves the responsiveness of the system across multiple users and processes, which is beneficial for environments with diverse workloads. + +Disadvantages (Cons) +Increased Complexity: Fair share scheduling can be more complex to implement compared to simpler algorithms like round-robin or first-come-first-served, as it requires managing and tracking groups and their allocated shares. + +Overhead in Resource Tracking: The system must monitor the CPU time used by each group, adding overhead to maintain this information, which can slightly reduce efficiency. + +Not Optimized for Real-Time Tasks: Fair share scheduling does not prioritize time-sensitive tasks, potentially leading to delays for high-priority processes if they are part of a lower-priority group. + +Suboptimal Performance for Single-User Systems: In environments with only one user or where most resources are used by a single user, fair share scheduling may be unnecessary and could add unnecessary complexity. + +Use Cases +Fair share scheduling is ideal in multi-user or multi-group environments such as: + +Academic or Research Institutions: Where multiple researchers or students share computational resources. +Enterprise Environments: Where resources need to be equitably divided among departments or teams. +Shared Server Systems: Cloud environments or shared servers where multiple users or clients access limited computational resources. +Overall, fair share scheduling balances CPU usage among users or groups, making it well-suited for multi-user systems, but it may add complexity and be less efficient in simpler or single-user systems. + + + + + + diff --git a/Miscellaneous Algorithms/two level scheduling/program.c b/Miscellaneous Algorithms/two level scheduling/program.c new file mode 100644 index 00000000..ea294710 --- /dev/null +++ b/Miscellaneous Algorithms/two level scheduling/program.c @@ -0,0 +1,64 @@ +#include +#include + +struct Process { + int pid; + int arrival_time; + int burst_time; + int waiting_time; + int turnaround_time; +}; + +void calculateTimes(struct Process *processes, int n) { + int total_waiting = 0, total_turnaround = 0; + processes[0].waiting_time = 0; + processes[0].turnaround_time = processes[0].burst_time; + + for (int i = 1; i < n; i++) { + processes[i].waiting_time = processes[i - 1].waiting_time + processes[i - 1].burst_time; + processes[i].turnaround_time = processes[i].waiting_time + processes[i].burst_time; + + total_waiting += processes[i].waiting_time; + total_turnaround += processes[i].turnaround_time; + } + + printf("\nProcess ID\tArrival Time\tBurst Time\tWaiting Time\tTurnaround Time\n"); + for (int i = 0; i < n; i++) { + printf("%d\t\t%d\t\t%d\t\t%d\t\t%d\n", processes[i].pid, processes[i].arrival_time, + processes[i].burst_time, processes[i].waiting_time, processes[i].turnaround_time); + } + printf("\nAverage Waiting Time: %.2f", (float)total_waiting / n); + printf("\nAverage Turnaround Time: %.2f\n", (float)total_turnaround / n); +} + +void twoLevelScheduling(struct Process *processes, int n) { + // Step 1: Sort processes by arrival time (Long-term scheduling) + for (int i = 0; i < n - 1; i++) { + for (int j = i + 1; j < n; j++) { + if (processes[i].arrival_time > processes[j].arrival_time) { + struct Process temp = processes[i]; + processes[i] = processes[j]; + processes[j] = temp; + } + } + } + + // Step 2: FCFS on sorted list (Short-term scheduling) + calculateTimes(processes, n); +} + +int main() { + int n; + printf("Enter the number of processes: "); + scanf("%d", &n); + + struct Process processes[n]; + for (int i = 0; i < n; i++) { + printf("Enter arrival time and burst time for process %d: ", i + 1); + processes[i].pid = i + 1; + scanf("%d %d", &processes[i].arrival_time, &processes[i].burst_time); + } + + twoLevelScheduling(processes, n); + return 0; +} diff --git a/Miscellaneous Algorithms/two level scheduling/readme.md b/Miscellaneous Algorithms/two level scheduling/readme.md new file mode 100644 index 00000000..e4870a7a --- /dev/null +++ b/Miscellaneous Algorithms/two level scheduling/readme.md @@ -0,0 +1,43 @@ +Two-level scheduling is a strategy used in operating systems to manage processes efficiently. It divides the scheduling process into two distinct phases to balance resource usage and system responsiveness, particularly in systems with a mix of interactive and batch processes. + +### Description of Two-Level Scheduling + +1. **First Level (Long-Term Scheduling):** + - The long-term scheduler, also called the admission scheduler, determines which processes are admitted to the system for execution. + - It decides the overall mix of active processes in the system by selecting a subset from a larger pool of tasks. Only a portion of processes are allowed into memory based on system resource availability. + - Long-term scheduling helps control the degree of multiprogramming (i.e., the number of concurrent processes in memory). + +2. **Second Level (Short-Term Scheduling):** + - The short-term scheduler, or CPU scheduler, operates on the processes that are already in memory. + - It frequently selects one of these processes to execute on the CPU, switching between processes as needed to optimize CPU utilization and system responsiveness. + - Short-term scheduling uses various algorithms (e.g., round-robin, priority scheduling) to decide which process gets CPU time next. + +### Pros of Two-Level Scheduling + +1. **Improved Resource Utilization:** + - Long-term scheduling controls memory usage by limiting the number of concurrent processes, helping to reduce memory thrashing (frequent page swaps). + +2. **Enhanced System Performance:** + - By focusing on short-term scheduling among active processes, it achieves higher CPU utilization and responsiveness, especially beneficial for interactive applications. + +3. **Reduced Overhead in Process Management:** + - By admitting only a manageable number of processes to memory, the system reduces overhead in handling context switching and managing process queues. + +4. **Flexibility with Different Process Types:** + - Works well in systems with mixed workloads by allowing batch jobs to run efficiently in the background and giving priority to interactive tasks. + +### Cons of Two-Level Scheduling + +1. **Increased Complexity:** + - Requires careful design and tuning to manage the two levels of scheduling effectively, adding to the complexity of the operating system. + +2. **Potential Latency for Batch Processes:** + - Long-term scheduling may delay certain processes (especially lower-priority or batch jobs), affecting the turnaround time for such tasks. + +3. **Memory Management Challenges:** + - Determining the right mix of processes admitted to memory requires sophisticated memory management, and poor decisions may lead to inefficient memory usage. + +4. **Higher Initial Overhead:** + - The process of admitting and scheduling processes at two levels can introduce some initial overhead, potentially slowing down system responsiveness under heavy load conditions. + +Two-level scheduling is particularly beneficial for multi-user systems and real-time applications where maintaining responsiveness for interactive users is critical. However, the additional complexity requires careful management and may not be suitable for simpler or single-user systems. diff --git a/Queue/CircularQueueUsingTwoStacks/CircularQueueUsingTwoStacks.c b/Queue/CircularQueueUsingTwoStacks/CircularQueueUsingTwoStacks.c new file mode 100644 index 00000000..a083aebf --- /dev/null +++ b/Queue/CircularQueueUsingTwoStacks/CircularQueueUsingTwoStacks.c @@ -0,0 +1,147 @@ +#include +#include + +#define MAX 100 + +typedef struct +{ + int arr[MAX]; + int top; +} Stack; + +// Function to initialize the stack +void initStack(Stack *stack) +{ + stack->top = -1; +} + +// Function to check if the stack is empty +int isEmpty(Stack *stack) +{ + return stack->top == -1; +} + +// Function to check if the stack is full +int isFull(Stack *stack) +{ + return stack->top == MAX - 1; +} + +// Function to push an element onto the stack +void push(Stack *stack, int value) +{ + if (isFull(stack)) + { + printf("Stack overflow, cannot push %d\n", value); + return; + } + stack->arr[++stack->top] = value; +} + +// Function to pop an element from the stack +int pop(Stack *stack) +{ + if (isEmpty(stack)) + { + printf("Stack underflow, cannot pop\n"); + return -1; + } + return stack->arr[stack->top--]; +} + +// Function to implement a circular queue using two stacks +typedef struct +{ + Stack stack1; + Stack stack2; +} CircularQueue; + +// Function to initialize the circular queue +void initQueue(CircularQueue *queue) +{ + initStack(&queue->stack1); + initStack(&queue->stack2); +} + +// Function to enqueue an element into the queue +void enqueue(CircularQueue *queue, int value) +{ + push(&queue->stack1, value); +} + +// Function to dequeue an element from the queue +int dequeue(CircularQueue *queue) +{ + if (isEmpty(&queue->stack2)) + { + // Transfer all elements from stack1 to stack2 + while (!isEmpty(&queue->stack1)) + { + int temp = pop(&queue->stack1); + push(&queue->stack2, temp); + } + } + + if (isEmpty(&queue->stack2)) + { + printf("Queue is empty, cannot dequeue\n"); + return -1; + } + + return pop(&queue->stack2); +} + +// Function to display the elements of the queue +void displayQueue(CircularQueue *queue) +{ + if (isEmpty(&queue->stack1) && isEmpty(&queue->stack2)) + { + printf("Queue is empty\n"); + return; + } + + // Print elements from stack2 (front of the queue) + for (int i = queue->stack2.top; i >= 0; i--) + { + printf("%d ", queue->stack2.arr[i]); + } + + // Print elements from stack1 (end of the queue) + for (int i = 0; i <= queue->stack1.top; i++) + { + printf("%d ", queue->stack1.arr[i]); + } + printf("\n"); +} + +int main() +{ + CircularQueue queue; + initQueue(&queue); + + // Enqueue elements + enqueue(&queue, 10); + enqueue(&queue, 20); + enqueue(&queue, 30); + + // Display queue + printf("Queue after enqueuing 10, 20, 30: "); + displayQueue(&queue); + + // Dequeue an element + printf("Dequeued element: %d\n", dequeue(&queue)); + + // Display queue after dequeue + printf("Queue after dequeue: "); + displayQueue(&queue); + + // Enqueue more elements + enqueue(&queue, 40); + enqueue(&queue, 50); + + // Display final state of the queue + printf("Queue after enqueuing 40, 50: "); + displayQueue(&queue); + + return 0; +} diff --git a/Queue/CircularQueueUsingTwoStacks/README.md b/Queue/CircularQueueUsingTwoStacks/README.md new file mode 100644 index 00000000..1bbfe9c1 --- /dev/null +++ b/Queue/CircularQueueUsingTwoStacks/README.md @@ -0,0 +1,31 @@ +# Explanation + +Two Stacks (stack1 and stack2): stack1 is used for enqueue operations, while stack2 is used for dequeue operations. +Enqueue Operation: Pushes the element onto stack1. +Dequeue Operation: If stack2 is empty, all elements from stack1 are transferred to stack2, reversing their order to maintain the FIFO property. The top element of stack2 is then popped to simulate dequeue. +Display Operation: Prints elements in stack2 from top to bottom (front of the queue) followed by stack1 from bottom to top (end of the queue). + +# Complexity + +Time Complexity: +Enqueue: +𝑂 +( +1 +) +O(1) +Dequeue: +𝑂 +( +𝑛 +) +O(n) in the worst case (when transferring elements between stacks) +Space Complexity: +𝑂 +( +𝑛 +) +O(n) where +𝑛 +n is the total number of elements in the queue. +This approach ensures that the queue behaves like a circular queue when combined with proper logic for handling full and empty cases. diff --git a/Searching Algorithms/Cascade Search /Readme.md b/Searching Algorithms/Cascade Search /Readme.md new file mode 100644 index 00000000..720b622e --- /dev/null +++ b/Searching Algorithms/Cascade Search /Readme.md @@ -0,0 +1,61 @@ + +## Overview +The **Cascade Search** algorithm is a sequential filtering process where data points are passed through multiple stages, each with progressively stricter criteria (thresholds). If a data point fails to meet the threshold at any stage, it is discarded, reducing the number of data points that need to be processed at subsequent stages. This approach can significantly improve efficiency, especially in applications such as object detection, where irrelevant data can be filtered early. + +In this C code implementation, we generate a list of random values and filter them through three stages, each with a specified threshold. + +## How to Use + +### Prerequisites +This code requires a C compiler, such as GCC. + +### Compilation and Execution +1. **Compile** the code: + ```bash + gcc cascade_search.c -o cascade_search + ``` +2. **Run** the compiled program: + ```bash + ./cascade_search + ``` + +### Code Structure +- **`generate_data`**: Generates an array of random integers between 0 and 99. +- **`print_data`**: Prints the generated array. +- **`cascade_stage`**: Checks if a data point meets a specified threshold. +- **`cascade_search`**: Applies the three-stage filtering process on the generated data array, printing values that pass all thresholds. + +### Example Output +The program first displays the randomly generated data, then iteratively prints values that pass each of the three stages. + +``` +Generated Data: +32 73 24 85 ... 97 43 22 + +Running Cascade Search... +Stage 1 (Threshold: 50): +Data[1] = 73 passed Stage 1 +Data[3] = 85 passed Stage 1 +... +Data[3] = 85 passed Stage 2 +... +Data[9] = 97 passed Stage 3 +``` + +## Time Complexity +The time complexity of this cascade search algorithm depends on the input size \( N \) and the percentage of data points that pass each stage: +- **Best Case**: \( O(N) \), if all data points fail the first stage (minimal filtering required). +- **Average Case**: \( O(N) \), assuming a constant fraction of data points pass each stage. +- **Worst Case**: \( O(N) \), if all elements pass all stages, requiring every data point to go through each threshold. + +Since each stage in this code operates independently with constant-time comparisons, time complexity per stage remains linear relative to the input size. + +## Space Complexity +The space complexity of this algorithm is: +- **\( O(N) \)** for the array to store generated data. +- **\( O(1) \)** additional space, since no extra data structures are used. + +Overall, the space complexity is **\( O(N) \)**. + + + diff --git a/Searching Algorithms/Cascade Search /program.c b/Searching Algorithms/Cascade Search /program.c new file mode 100644 index 00000000..bcca8154 --- /dev/null +++ b/Searching Algorithms/Cascade Search /program.c @@ -0,0 +1,60 @@ +#include +#include +#include + +#define NUM_ELEMENTS 100 +#define THRESHOLD_STAGE1 50 +#define THRESHOLD_STAGE2 75 +#define THRESHOLD_STAGE3 90 + +void generate_data(int *data, int size) { + srand(time(0)); + for (int i = 0; i < size; i++) { + data[i] = rand() % 100; // Random values between 0 and 99 + } +} + +void print_data(const int *data, int size) { + for (int i = 0; i < size; i++) { + printf("%d ", data[i]); + } + printf("\n"); +} + +int cascade_stage(int value, int threshold) { + return value >= threshold; +} + +void cascade_search(int *data, int size) { + printf("Running Cascade Search...\n"); + + // Stage 1: Filter based on THRESHOLD_STAGE1 + printf("Stage 1 (Threshold: %d):\n", THRESHOLD_STAGE1); + for (int i = 0; i < size; i++) { + if (cascade_stage(data[i], THRESHOLD_STAGE1)) { + printf("Data[%d] = %d passed Stage 1\n", i, data[i]); + + // Stage 2: Filter based on THRESHOLD_STAGE2 + if (cascade_stage(data[i], THRESHOLD_STAGE2)) { + printf("Data[%d] = %d passed Stage 2\n", i, data[i]); + + // Stage 3: Filter based on THRESHOLD_STAGE3 + if (cascade_stage(data[i], THRESHOLD_STAGE3)) { + printf("Data[%d] = %d passed Stage 3\n", i, data[i]); + } + } + } + } +} + +int main() { + int data[NUM_ELEMENTS]; + generate_data(data, NUM_ELEMENTS); + + printf("Generated Data:\n"); + print_data(data, NUM_ELEMENTS); + + cascade_search(data, NUM_ELEMENTS); + + return 0; +} diff --git a/Searching Algorithms/Kth_Largest_Element_Of_The_Array/Valid_Parentheses/Readme.md b/Searching Algorithms/Kth_Largest_Element_Of_The_Array/Valid_Parentheses/Readme.md new file mode 100644 index 00000000..9c7c52dc --- /dev/null +++ b/Searching Algorithms/Kth_Largest_Element_Of_The_Array/Valid_Parentheses/Readme.md @@ -0,0 +1,40 @@ +## Valid Parentheses + +# Problem Description +The problem of "Valid Parentheses" is a common algorithmic challenge where you need to determine if a given string containing just the characters '(', ')', '{', '}', '[' and ']' is valid. A string is considered valid if: + +Open brackets must be closed by the corresponding closing brackets. +Open brackets must be closed in the correct order. + +# Example +Input: s = "()" +Output: true + +Input: s = "()[]{}" +Output: true + +Input: s = "(] " +Output: false + +Input: s = "([)]" +Output: false + +Input: s = "{[]}" +Output: true + +# Solution Approach +A common approach to solving the valid parentheses problem is to use a stack data structure. The stack helps to keep track of the opening brackets and ensures that they are closed in the correct order. + +Steps: +Initialize an empty stack. +Iterate through each character in the string: +If the character is an opening bracket ((, {, [), push it onto the stack. +If the character is a closing bracket (), }, ]): +Check if the stack is empty. If it is, return false (there's no corresponding opening bracket). +Pop the top element from the stack and check if it matches the current closing bracket. If it does not match, return false. +After processing all characters, check if the stack is empty. If it is empty, return true (all opening brackets were matched); otherwise, return false. + +# Time and space complexity +Time Complexity: O(n), where n is the length of the string. We make a single pass through the string, performing constant time operations for each character. + +Space Complexity: O(n) in the worst case, where all characters are opening parentheses, which would require storing them in the stack. In the best case, the space used would be O(1) if the string is valid and balanced. \ No newline at end of file diff --git a/Searching Algorithms/Kth_Largest_Element_Of_The_Array/Valid_Parentheses/Valid Parentheses.c b/Searching Algorithms/Kth_Largest_Element_Of_The_Array/Valid_Parentheses/Valid Parentheses.c new file mode 100644 index 00000000..7587f5a0 --- /dev/null +++ b/Searching Algorithms/Kth_Largest_Element_Of_The_Array/Valid_Parentheses/Valid Parentheses.c @@ -0,0 +1,41 @@ +#include +#include +#include + +bool isValid(char * s) { + int stackSize = 0; + int stackCapacity = 100; // Initial capacity for the stack + char *stack = (char *)malloc(stackCapacity * sizeof(char)); + + for (int i = 0; s[i] != '\0'; i++) { + if (s[i] == '(') { + // Push to stack + if (stackSize == stackCapacity) { + stackCapacity *= 2; // Double the stack size if needed + stack = (char *)realloc(stack, stackCapacity * sizeof(char)); + } + stack[stackSize++] = s[i]; + } else if (s[i] == ')') { + // Pop from stack + if (stackSize == 0) { + free(stack); + return false; // No matching opening parenthesis + } + stackSize--; // Pop the last opening parenthesis + } + } + + bool result = (stackSize == 0); // If stack is empty, valid parentheses + free(stack); + return result; +} + +int main() { + char *s = "(()())"; + if (isValid(s)) { + printf("Valid parentheses\n"); + } else { + printf("Invalid parentheses\n"); + } + return 0; +} \ No newline at end of file diff --git a/Sorting Algorithms/Merge_k_Sorted_Lists/Merge k sorted lists.c b/Sorting Algorithms/Merge_k_Sorted_Lists/Merge k sorted lists.c new file mode 100644 index 00000000..4d7ea611 --- /dev/null +++ b/Sorting Algorithms/Merge_k_Sorted_Lists/Merge k sorted lists.c @@ -0,0 +1,87 @@ +#include +#include + +// Definition for singly-linked list. +struct ListNode { + int val; + struct ListNode *next; +}; + +// Min-Heap Node +struct MinHeapNode { + struct ListNode *listNode; +}; + +// MinHeap +struct MinHeap { + int size; + struct MinHeapNode **array; +}; + +// Function to create a new MinHeap Node +struct MinHeapNode* newMinHeapNode(struct ListNode *listNode) { + struct MinHeapNode* minHeapNode = (struct MinHeapNode*)malloc(sizeof(struct MinHeapNode)); + minHeapNode->listNode = listNode; + return minHeapNode; +} + +// Function to create a MinHeap +struct MinHeap* createMinHeap(int capacity) { + struct MinHeap* minHeap = (struct MinHeap*)malloc(sizeof(struct MinHeap)); + minHeap->size = 0; + minHeap->array= (struct MinHeapNode*)malloc(capacity * sizeof(struct MinHeapNode)); + return minHeap; +} + +// Function to swap two MinHeap Nodes +void swapMinHeapNode(struct MinHeapNode** a, struct MinHeapNode** b) { + struct MinHeapNode* t = *a; + *a = *b; + *b = t; +} + +// Function to min-heapify at a given index +void minHeapify(struct MinHeap* minHeap, int idx) { + int smallest = idx; + int left = 2 * idx + 1; + int right = 2 * idx + 2; + + if (left < minHeap->size && minHeap->array[left]->listNode->val < minHeap->array[smallest]->listNode->val) + smallest = left; + + if (right < minHeap->size && minHeap->array[right]->listNode->val < minHeap->array[smallest]->listNode->val) + smallest = right; + + if (smallest != idx) { + swapMinHeapNode(&minHeap->array[smallest], &minHeap->array[idx]); + minHeapify(minHeap, smallest); + } +} + +// Function to extract the minimum node from the heap +struct ListNode* extractMin(struct MinHeap* minHeap) { + if (minHeap->size == 0) + return NULL; + + struct ListNode* root = minHeap->array[0]->listNode; + + if (minHeap->size > 1) { + minHeap->array[0] = minHeap->array[minHeap->size - 1]; + minHeapify(minHeap, 0); + } + minHeap->size--; + return root; +} + +// Function to insert a new node into the heap +void insertMinHeap(struct MinHeap* minHeap, struct MinHeapNode* minHeapNode) { + minHeap->size++; + int i = minHeap->size - 1; + while (i && minHeapNode->listNode->val < minHeap->array[(i - 1) / 2]->listNode->val) { + minHeap->array[i] = minHeap->array[(i - 1) / 2]; + i = (i - 1) / 2; + } + minHeap->array[i] = minHeapNode; +} + +// Function to merge k sorted linked \ No newline at end of file diff --git a/Sorting Algorithms/Merge_k_Sorted_Lists/Readme.md b/Sorting Algorithms/Merge_k_Sorted_Lists/Readme.md new file mode 100644 index 00000000..d23c5cbf --- /dev/null +++ b/Sorting Algorithms/Merge_k_Sorted_Lists/Readme.md @@ -0,0 +1,25 @@ +## Merge k Sorted Lists + +# Problem Description +The problem of merging k sorted linked lists involves taking k linked lists, each sorted in ascending order, and merging them into a single sorted linked list. The challenge is to do this efficiently, both in terms of time and space. + +# Example +Given the following k sorted linked lists: + +List 1: 1 -> 4 -> 5 +List 2: 1 -> 3 -> 4 +List 3: 2 -> 6 +The merged sorted linked list should be: + +1 -> 1 -> 2 -> 3 -> 4 -> 4 -> 5 -> 6 + +# Solution Approach +Min-Heap (Priority Queue): + +Use a min-heap (or priority queue) to efficiently retrieve the smallest element among the heads of the k lists. +Push the head of each linked list into the min-heap. +Repeatedly extract the minimum element from the heap, adding it to the merged list, and push the next element from the same linked list into the heap. +Continue this process until all elements from all lists have been processed. +Iterative Merging: + +Alternatively, you could merge the lists iteratively, but this is less efficient than using a min-heap. \ No newline at end of file diff --git a/StackQueues/TwoStackstoImplementQueue/READme.md b/StackQueues/TwoStackstoImplementQueue/READme.md new file mode 100644 index 00000000..d35244b5 --- /dev/null +++ b/StackQueues/TwoStackstoImplementQueue/READme.md @@ -0,0 +1,33 @@ + # Two Stacks to Implement a Queue + +This project demonstrates how to implement a queue using two stacks in the C programming language. A queue follows the First-In-First-Out (FIFO) principle, while a stack follows the Last-In-First-Out (LIFO) principle. By using two stacks, we can simulate the behavior of a queue. + +## Problem Explanation + +The goal is to create a queue using two stacks (`stack1` and `stack2`). The queue will support two main operations: +1. **Enqueue (push)** - Adds an element to the end of the queue. +2. **Dequeue (pop)** - Removes an element from the front of the queue. + +### Solution Approach + +1. **Enqueue Operation**: + - Push the new element onto `stack1`. + +2. **Dequeue Operation**: + - If `stack2` is empty, transfer all elements from `stack1` to `stack2`. This reverses the order, so the oldest element is at the top of `stack2`. + - Pop the top element from `stack2`, which is the front of the queue. + +## Code Implementation + +The program is written in C, with a `QueueUsingStacks` structure that contains two stacks (`stack1` and `stack2`). + +### Files +- `queue_using_stacks.c`: Contains the main code for the queue implementation. + +## Usage + +1. Clone this repository or download the `queue_using_stacks.c` file. +2. Compile the code: + ```bash + gcc queue_using_stacks.c -o queue_using_stacks + diff --git a/StackQueues/TwoStackstoImplementQueue/StackQueue.c b/StackQueues/TwoStackstoImplementQueue/StackQueue.c new file mode 100644 index 00000000..47e8889b --- /dev/null +++ b/StackQueues/TwoStackstoImplementQueue/StackQueue.c @@ -0,0 +1,98 @@ +#include +#include + +#define MAX 100 + +// Stack structure with top pointer and array for elements +typedef struct Stack { + int arr[MAX]; + int top; +} Stack; + +// Function to initialize a stack +void initStack(Stack *stack) { + stack->top = -1; +} + +// Function to check if a stack is empty +int isEmpty(Stack *stack) { + return stack->top == -1; +} + +// Function to check if a stack is full +int isFull(Stack *stack) { + return stack->top == MAX - 1; +} + +// Function to push an element onto a stack +void push(Stack *stack, int data) { + if (isFull(stack)) { + printf("Stack overflow\n"); + return; + } + stack->arr[++stack->top] = data; +} + +// Function to pop an element from a stack +int pop(Stack *stack) { + if (isEmpty(stack)) { + printf("Stack underflow\n"); + return -1; + } + return stack->arr[stack->top--]; +} + +// Queue structure using two stacks +typedef struct QueueUsingStacks { + Stack stack1; + Stack stack2; +} QueueUsingStacks; + +// Function to initialize the queue +void initQueue(QueueUsingStacks *queue) { + initStack(&queue->stack1); + initStack(&queue->stack2); +} + +// Enqueue operation +void enqueue(QueueUsingStacks *queue, int data) { + push(&queue->stack1, data); + printf("Enqueued %d\n", data); +} + +// Dequeue operation +int dequeue(QueueUsingStacks *queue) { + // If both stacks are empty, queue is empty + if (isEmpty(&queue->stack1) && isEmpty(&queue->stack2)) { + printf("Queue is empty\n"); + return -1; + } + + // Move elements from stack1 to stack2 if stack2 is empty + if (isEmpty(&queue->stack2)) { + while (!isEmpty(&queue->stack1)) { + push(&queue->stack2, pop(&queue->stack1)); + } + } + + // Pop from stack2, which is the front of the queue + int dequeuedValue = pop(&queue->stack2); + printf("Dequeued %d\n", dequeuedValue); + return dequeuedValue; +} + +int main() { + QueueUsingStacks queue; + initQueue(&queue); + + enqueue(&queue, 1); + enqueue(&queue, 2); + enqueue(&queue, 3); + + dequeue(&queue); + dequeue(&queue); + dequeue(&queue); + dequeue(&queue); // Queue is empty at this point + + return 0; +} diff --git a/String Algorithms/Longest_Common_Prefix/README.md b/String Algorithms/Longest_Common_Prefix/README.md new file mode 100644 index 00000000..e965b758 --- /dev/null +++ b/String Algorithms/Longest_Common_Prefix/README.md @@ -0,0 +1,91 @@ +# Longest Common Prefix + +## Description + +This program finds the Longest Common Prefix (LCP) among an array of strings. The LCP is the longest sequence of characters shared at the beginning of all strings in the array. The program employs an efficient algorithm to identify the common prefix by comparing the strings character by character. + +``` +Example: +Enter the number of strings: 5 +Enter the strings: +String 1: prawn +String 2: prajakta +String 3: prince +String 4: probably +String 5: project + +Output: +The longest common prefix is: pr +``` + +### Problem Definition + +1. **Given**: +- An array of strings and the number of strings, `n`. + +2. **Objective**: +- Find the longest sequence of characters common to the beginning of all strings in the array. + +### Algorithm Overview + +1. **Input Validation**: + - Ensure the array contains at least one string. + +2. **Prefix Comparison**: + - Start with the first string as the initial prefix. + - Compare the prefix with each subsequent string character by character. + - Update the prefix to the common portion after each comparison. + +3. **Early Termination**: + - If the prefix becomes empty at any point, terminate early as there is no common prefix. + +5. **Output Result**: + - Display the longest common prefix if found; otherwise, indicate no common prefix. + +### Key Features + +- Efficiently determines the common prefix without unnecessary comparisons. +- Handles edge cases such as identical strings, empty strings, and no common prefix. +- User-friendly input and output. + +### Time Complexity + +- **Best case**: `O(n * m)`, where `n` is the number of strings and `m` is the length of the shortest string. +- **Worst case**: Same as the best case since each character is processed at most once. + +### Space Complexity + +- `O(m)` for storing the prefix, where `m` is the length of the shortest string + +## Implementation + +The implementation in C includes: + +1. **Input Handling**: + - Accepts the number of strings and their contents. + +2. **Logic**: + - A function to iteratively compute the longest common prefix. + - Compares the prefix with each string and updates it to the common characters. + +## Edge Cases for Testing + +1. **No Common Prefix**: + - Input: `["dog", "racecar", "car"]` + - Output: `There is no common prefix among the strings.` +2. **All Strings Identical**: + - Input: `["apple", "apple", "apple"]` + - Output: `The longest common prefix is: apple` +3. **Single String**: + - Input: `["alone"]` + - Output: `The longest common prefix is: alone` +4. **Empty Strings**: + - Input: `["", "abc", "abcd"]` + - Output: `There is no common prefix among the strings.` + +## Usage + +- Compile the program using a C compiler (e.g., `gcc longest_common_prefix.c -o lcp`). +- Run the program (`./lcp`). +- Input the number of strings and their values as prompted. +- Observe the output indicating the longest common prefix or a message stating there is no common prefix. \ No newline at end of file diff --git a/String Algorithms/Longest_Common_Prefix/longest_common_prefix.c b/String Algorithms/Longest_Common_Prefix/longest_common_prefix.c new file mode 100644 index 00000000..7f8b1ccf --- /dev/null +++ b/String Algorithms/Longest_Common_Prefix/longest_common_prefix.c @@ -0,0 +1,62 @@ +#include +#include + +// Defined max value for number of strings and their length +#define MAX_STRINGS 100 +#define MAX_LENGTH 100 + +// Function to find the longest common prefix among an array of strings +char* longestCommonPrefix(char arr[][MAX_LENGTH], int n) { + static char prefix[MAX_LENGTH]; + strcpy(prefix, arr[0]); // Assume the first string as the prefix + + for (int i = 1; i < n; i++) { + int j = 0; + + // Compare the current prefix with the next string character by character + while (j < strlen(prefix) && j < strlen(arr[i]) && prefix[j] == arr[i][j]) { + j++; + } + + // Update the prefix to the common portion + prefix[j] = '\0'; + + // If the prefix becomes empty, return immediately + if (prefix[0] == '\0') { + return prefix; + } + } + + return prefix; +} + +int main() { + int n; + + printf("Enter the number of strings: "); + scanf("%d", &n); + + if (n <= 0 || n > MAX_STRINGS) { + printf("Invalid number of strings! Please enter a value between 1 and %d.\n", MAX_STRINGS); + return 1; + } + + // 2D array with rows as strings and columns for string lenth for easy comparison + char arr[MAX_STRINGS][MAX_LENGTH]; + + printf("Enter the strings:\n"); + for (int i = 0; i < n; i++) { + printf("String %d: ", i + 1); + scanf("%s", arr[i]); + } + + char* prefix = longestCommonPrefix(arr, n); + + if (strlen(prefix) > 0) { + printf("The longest common prefix is: %s\n", prefix); + } else { + printf("There is no common prefix among the strings.\n"); + } + + return 0; +}