CODE

 1.Implementation of BFS

 from collections import deque

graph = {

    'A': ['B', 'C'],

    'B': ['C', 'D', 'E'],

    'C': ['F'],

    'D': [],

    'E': ['F'],

    'F': []

}

def bfs(graph, start_node, key):

    visited = set()

    queue = deque([start_node])

    print("Breadth First Search started")

    while queue:

        node = queue.popleft()

        if node == key:

            print(node, "Found")

            return

        if node not in visited:

            print(node, end=" -> ")

            visited.add(node)

            for i in graph[node]:

                if i not in visited:

                    queue.append(i)

    print("\nkey not found")

# Call the function

bfs(graph, 'A', 'E')


Algorithm

  1. Start from the given start node.

  2. Initialize an empty queue.

  3. Insert the start node into the queue.

  4. Initialize an empty visited set.

  5. Repeat until the queue becomes empty:
    a. Remove the front node from the queue.
    b. If the node is not visited:

    • Visit the node.

    • Add it to the visited set.

    • Insert all unvisited neighbors of this node into the queue.

  6. End the BFS traversal when the queue becomes empty.






2.Implementation of DFS

# Graph
graph = {
    'A': ['B', 'C'],
    'B': ['D', 'E'],
    'C': ['F'],
    'D': [],
    'E': ['F'],
    'F': []
}

def DFS(graph, start_node):
    visited = set()
    stack = [start_node]

    print("Depth First Search started")

    while stack:
        node = stack.pop()

        if node not in visited:
            print(node, end=" -> ")
            visited.add(node)

            # Push neighbors in reverse order for correct DFS order
            for n in reversed(graph[node]):
                if n not in visited:
                    stack.append(n)

# Call DFS
DFS(graph, 'A')

Algorithm

  1. Start with the given start node.

  2. Initialize an empty stack.

  3. Push the start node into the stack.

  4. Initialize an empty visited set.

  5. Repeat until the stack becomes empty:
    a. Pop the top node from the stack.
    b. If the node is not visited:

    • Visit the node.

    • Add it to the visited set.

    • Push all unvisited neighbors of this node into the stack
      (usually in reverse order to maintain correct DFS order).

  6. End DFS when the stack becomes empty.



3.Implementation of A*


import heapq

# Graph with edge costs
graph = {
    'A': [('B', 2), ('D', 5), ('E', 3)],
    'B': [('D', 3), ('E', 1), ('C', 7)],
    'C': [('E', 3), ('D', 2)],
    'D': [('C', 6), ('E', 7)],
    'E': []
}

# Heuristic values
heuristic = {
    'A': 7,
    'B': 6,
    'C': 8,
    'D': 10,
    'E': 0
}

def astar(graph, start, goal, heuristic):
    open_list = []
    heapq.heappush(open_list, (heuristic[start], 0, start, [start]))

    visited = set()

    while open_list:
        f, g, current, path = heapq.heappop(open_list)

        if current == goal:
            return path, g

        if current in visited:
            continue

        visited.add(current)

        for neighbor, cost in graph[current]:
            if neighbor not in visited:
                new_g = g + cost
                f_score = new_g + heuristic[neighbor]

                heapq.heappush(open_list, (f_score, new_g, neighbor, path + [neighbor]))

    return None, float("inf")


# Run A*
path, cost = astar(graph, 'A', 'E', heuristic)

print("Shortest path from A to E:")
print(" -> ".join(path))
print("Total cost:", cost)

ALGORITHM 

  1. Put the start node in the open list.

  2. Initialize:

    • open_list = {start}

    • closed_list = {}

    • g(start) = 0

    • f(start) = g(start) + h(start)

  3. Repeat until open list becomes empty:

    • Pick the node with lowest f(n) from open list.

    • If this node is the goal, stop and return the path.

    • Move node from open list → closed list.

    • For each neighbor:

      • Calculate temporary g_cost.

      • If neighbor not in open/closed list → insert into open list.

      • Else if new g_cost is smaller → update its f, g, parent.

  4. Continue until the goal is found.

  5. Reconstruct the path by following parent links.





Tic-Tac-Toe

import math

board = [" " for _ in range(9)]

def print_board():
    print()
    print(board[0], "|", board[1], "|", board[2])
    print("--+---+--")
    print(board[3], "|", board[4], "|", board[5])
    print("--+---+--")
    print(board[6], "|", board[7], "|", board[8])
    print()

def check_win(player):
    win_conditions = [
        [0,1,2], [3,4,5], [6,7,8],   # rows
        [0,3,6], [1,4,7], [2,5,8],   # columns
        [0,4,8], [2,4,6]             # diagonals
    ]

    for c in win_conditions:
        if board[c[0]] == board[c[1]] == board[c[2]] == player:
            return True
    return False

def check_draw():
    return " " not in board

# Minimax Algorithm
def minimax(is_maximizing):
    if check_win("O"):
        return 1
    if check_win("X"):
        return -1
    if check_draw():
        return 0

    if is_maximizing:
        best_score = -math.inf
        for i in range(9):
            if board[i] == " ":
                board[i] = "O"
                score = minimax(False)
                board[i] = " "
                best_score = max(best_score, score)
        return best_score

    else:
        best_score = math.inf
        for i in range(9):
            if board[i] == " ":
                board[i] = "X"
                score = minimax(True)
                board[i] = " "
                best_score = min(best_score, score)
        return best_score

def computer_move():
    best_score = -math.inf
    move = None

    for i in range(9):
        if board[i] == " ":
            board[i] = "O"
            score = minimax(False)
            board[i] = " "
            if score > best_score:
                best_score = score
                move = i

    board[move] = "O"

def player_move():
    while True:
        pos = int(input("Enter position (1-9): ")) - 1
        if pos >= 0 and pos < 9 and board[pos] == " ":
            board[pos] = "X"
            break
        else:
            print("Invalid move. Try again.")

def game():
    print("Tic-Tac-Toe (You = X, Computer = O)")
    print_board()

    while True:
        player_move()
        print_board()

        if check_win("X"):
            print("You win!")
            break
        if check_draw():
            print("It's a draw!")
            break

        print("Computer is thinking...")
        computer_move()
        print_board()

        if check_win("O"):
            print("Computer wins!")
            break
        if check_draw():
            print("It's a draw!")
            break

# Run game
game()

 Algorithm 
  1. Start the game and initialize a 3×3 empty board.

  2. Set Player X to play first.

  3. Display the board to the players.

  4. Take input from the current player (X or O) for the cell they want to mark.

  5. Check if the cell is empty:

    • If empty → place the player's mark.

    • If not empty → ask again (invalid move).

  6. Update and display the board after the move.

  7. Check for a winner by verifying all rows, columns, and diagonals for three same marks.

  8. If a player wins, announce the winner and stop the game.

  9. If the board is full and no one wins, declare a draw.

  10. Switch the player (X ↔ O) and repeat steps 4–10 until the game ends.




Minimax 

# Game Tree (Adjacency List)
game_tree = {
    "A": ["B", "C", "D"],
    "B": ["E", "F"],
    "C": ["G", "H"],
    "D": ["I", "J"],
    "E": ["K", "L"],
    "F": ["M", "N"],
    "G": ["O", "P"],
    "H": ["Q", "R"],
    "I": ["S", "T"],
    "J": ["U", "V"]
}

# Leaf node values
leaf_values = {
    "K": 5, "L": 8,
    "M": 3, "N": 1,
    "O": 1, "P": 5,
    "Q": 4, "R": 6,
    "S": 7, "T": 2,
    "U": 9, "V": 4
}

def minimax(node, depth, is_max):
    # If leaf node
    if node in leaf_values:
        return leaf_values[node]

    # MAX player's turn
    if is_max:
        best_value = float('-inf')
        for child in game_tree[node]:
            val = minimax(child, depth + 1, False)
            best_value = max(best_value, val)
        return best_value

    # MIN player's turn
    else:
        best_value = float('inf')
        for child in game_tree[node]:
            val = minimax(child, depth + 1, True)
            best_value = min(best_value, val)
        return best_value

# Run minimax from root node A
best_score = minimax('A', 0, True)

print("The optimal value is:", best_score)

 Algorithm 

  1. Define players:

    • Maximizer (usually AI)

    • Minimizer (usually human)

  2. Check if the current board state is terminal:

    • Win

    • Loss

    • Draw

  3. If terminal state, return a score:

    • +1 for AI win

    • –1 for AI loss

    • 0 for draw

  4. If it is maximizer’s turn (AI):

    • Initialize bestScore = –∞

  5. Generate all possible moves (empty cells on board).

  6. For each move, simulate playing that move.

  7. Recursively call Minimax on the resulting board state, now switching to the other player.

  8. Undo the move (backtracking to explore other possibilities).

  9. Update bestScore:

    • Maximizer picks maximum score

    • Minimizer picks minimum score

  10. Return the bestScore to the previous step, ensuring optimal move selection at the top level.




Linear Regression Code

import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt

# Data
x = np.array([500, 700, 800, 1000, 1200, 1500, 1800, 2000, 3000])
y = np.array([15, 20, 22, 28, 35, 45, 50, 55, 80])

# Reshape x for sklearn
x = x.reshape(-1, 1)

# Create and train model
model = LinearRegression()
model.fit(x, y)

# Model parameters
m = model.coef_[0]        # slope
c = model.intercept_       # intercept

print("Equation: Price = (", m, ") * Size + (", c, ")")

# Plot
plt.scatter(x, y, label='Data Points')
plt.plot(x, model.predict(x), label='Best Fit Line')

plt.xlabel("House Size (sq-ft)")
plt.ylabel("House Price (Lakhs)")
plt.title("House Size vs House Price")
plt.legend()
plt.show()

 Algorithm

  1. Start

  2. Collect the dataset containing input values XX and output values YY.

  3. Calculate the mean of XX (mean_x) and YY (mean_y).

  4. Compute the numerator for slope:
    (Xmeanx)×(Ymeany)(X - mean_x) \times (Y - mean_y) and take the sum.

  5. Compute the denominator for slope:
    (Xmeanx)2(X - mean_x)^2 and take the sum.

  6. Calculate the slope (m) using:

    m=(Xmeanx)(Ymeany)(Xmeanx)2m = \frac{\sum (X - mean_x)(Y - mean_y)}{\sum (X - mean_x)^2}
  7. Calculate the intercept (c) using:

    c=meanym×meanxc = mean_y - m \times mean_x
  8. Form the regression equation:

    Y=mX+cY = mX + c
  9. Give input value for which prediction is required.

  10. Compute the predicted output using the regression equation and stop.



Logistic Regression

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# ----------------------------
# PART 1: PLOT SIGMOID CURVE
# ----------------------------

def sigmoid(z):
    return 1 / (1 + np.exp(-z))

# Generate test values
x_test = np.linspace(0, 10, 100).reshape(-1, 1)
x_test_bias = np.c_[np.ones(len(x_test)), x_test]

# Manual sigmoid function for plotting
y_test_manual = sigmoid(x_test_bias.dot(np.array([ -6, 1])).reshape(-1, 1))

# Sort values to make a smooth curve
sorted_indices = x_test.ravel().argsort()
x_sorted = x_test.ravel()[sorted_indices]
y_sorted = y_test_manual.ravel()[sorted_indices]

plt.scatter(x_test, y_test_manual, color="blue", label="Data Points")
plt.plot(x_sorted, y_sorted, color="red", linestyle="solid", label="Sigmoid Curve")

plt.xlabel("Study Hours")
plt.ylabel("Probability of Passing")
plt.title("Logistic Regression (Sigmoid Curve)")
plt.legend()
plt.show()


# ----------------------------------------
# PART 2: LOGISTIC REGRESSION FROM SCRATCH
# ----------------------------------------

# Training Data
x = np.array([0, 2, 4, 5, 6, 8, 9, 10]).reshape(-1, 1)
y = np.array([0, 0, 0, 0, 1, 1, 1, 1])

def sigmoid(z):
    return 1 / (1 + np.exp(-z))

# Add bias term
x_bias = np.c_[np.ones((len(x), 1)), x]

# Initialize weights
theta = np.random.randn(2, 1)

lr = 0.1
epochs = 1000

for epoch in range(epochs):
    z = x_bias.dot(theta)
    h = sigmoid(z)
    
    gradient = x_bias.T.dot(h - y.reshape(-1, 1))
    theta -= lr * gradient

# Extract coefficients
b = theta[0][0]
m = theta[1][0]

print(f"[Manual] Logistic Equation: p(pass) = sigmoid({m:.4f} * x + {b:.4f})")


Algorithm

  1. Start

  2. Collect training data containing features XX and target labels YY (0 or 1).

  3. Initialize weights (θ) and bias value (can be 0 or random).

  4. Compute the linear combination:

    z=θTX+bz = \theta^T X + b
  5. Apply the sigmoid function to get predicted probability:

    h=σ(z)=11+ezh = \sigma(z) = \frac{1}{1 + e^{-z}}
  6. Compute the loss (cost function) using:

    J=1m[ylog(h)+(1y)log(1h)]J = -\frac{1}{m} \sum [y\log(h) + (1-y)\log(1-h)]
  7. Calculate gradients:

    gradient=1mXT(hy)\text{gradient} = \frac{1}{m} X^T(h - y)
  8. Update weights (gradient descent):

    θ=θα×gradient\theta = \theta - \alpha \times \text{gradient}
  9. Repeat steps 4 to 8 for a fixed number of epochs or until convergence.

  10. Use the final model to predict output class:

  • if h0.5h \ge 0.5 \Rightarrow predict 1

  • else predict 0, and Stop.



Comments

Popular posts from this blog

C-Programming of Class 12

C-Program of Class 11