diff --git a/build/1.json b/build/1.json index ad432627..09ddad8e 100644 --- a/build/1.json +++ b/build/1.json @@ -37,16 +37,20 @@ "expected_output": "[5.5, 10.0]" } ], - "tinygrad_starter_code": "from tinygrad.tensor import Tensor\n\ndef matrix_dot_vector_tg(a, b) -> Tensor:\n \"\"\"\n Compute the product of matrix `a` and vector `b` using tinygrad.\n Inputs can be Python lists, NumPy arrays, or tinygrad Tensors.\n Returns a 1-D Tensor of length m, or Tensor(-1) if dimensions mismatch.\n \"\"\"\n # Dimension mismatch check\n if len(a[0]) != len(b):\n return Tensor(-1)\n # Convert to Tensor\n a_t = Tensor(a)\n b_t = Tensor(b)\n # Your implementation here\n pass", - "tinygrad_solution": "from tinygrad.tensor import Tensor\n\ndef matrix_dot_vector_tg(a, b) -> Tensor:\n \"\"\"\n Compute the product of matrix `a` and vector `b` using tinygrad.\n Inputs can be Python lists, NumPy arrays, or tinygrad Tensors.\n Returns a 1-D Tensor of length m, or Tensor(-1) if dimensions mismatch.\n \"\"\"\n if len(a[0]) != len(b):\n return Tensor(-1)\n a_t = Tensor(a)\n b_t = Tensor(b)\n return a_t.matmul(b_t)", + "tinygrad_starter_code": "from tinygrad.tensor import Tensor\n\ndef matrix_dot_vector_tg(a:Tensor, b:Tensor) -> Tensor:\n \"\"\"\n Compute the product of matrix `a` and vector `b` using tinygrad.\n Will be tinygrad Tensors.\n Returns a 1-D Tensor of length m, or Tensor(-1) if dimensions mismatch.\n \"\"\"\n pass", + "tinygrad_solution": "from tinygrad.tensor import Tensor\n\ndef matrix_dot_vector_tg(a: Tensor, b: Tensor) -> Tensor:\n \"\"\"\n Compute the product of matrix `a` and vector `b` using tinygrad.\n Inputs will be tinygrad Tensors.\n Returns a 1-D Tensor of length m, or Tensor(-1) if dimensions mismatch.\n \"\"\"\n if len(a[0]) != len(b):\n return Tensor(-1)\n return a @ b", "tinygrad_test_cases": [ { - "test": "from tinygrad.tensor import Tensor\nres = matrix_dot_vector_tg(\n [[1,2,3],[2,4,5],[6,8,9]],\n [1,2,3]\n)\nprint(res.numpy().tolist())", + "test": "from tinygrad.tensor import Tensor\nres = matrix_dot_vector_tg(\n Tensor([[1,2,3],[2,4,5],[6,8,9]]),\n Tensor([1,2,3])\n)\nprint(res.numpy().tolist())", "expected_output": "[14.0, 25.0, 49.0]" }, { - "test": "from tinygrad.tensor import Tensor\nres = matrix_dot_vector_tg(\n [[1,2,3],[2,4,5]],\n [1,2]\n)\nprint(res.numpy().tolist())", + "test": "from tinygrad.tensor import Tensor\nres = matrix_dot_vector_tg(\n Tensor([[1,2,3],[2,4,5]]),\n Tensor([1,2])\n)\nprint(res.numpy().tolist())", "expected_output": "-1" + }, + { + "test": "from tinygrad.tensor import Tensor\nres = matrix_dot_vector_tg(\n Tensor([[1, 2], [2, 4]]),\n Tensor([1, 2])\n)\nprint(res.numpy().tolist())", + "expected_output": "[5, 10]" } ], "pytorch_starter_code": "import torch\n\ndef matrix_dot_vector(a, b) -> torch.Tensor:\n \"\"\"\n Compute the product of matrix `a` and vector `b` using PyTorch.\n Inputs can be Python lists, NumPy arrays, or torch Tensors.\n Returns a 1-D tensor of length m, or tensor(-1) if dimensions mismatch.\n \"\"\"\n a_t = torch.as_tensor(a, dtype=torch.float)\n b_t = torch.as_tensor(b, dtype=torch.float)\n # Dimension mismatch check\n if a_t.size(1) != b_t.size(0):\n return torch.tensor(-1)\n # Your implementation here\n pass", diff --git a/build/174.json b/build/174.json new file mode 100644 index 00000000..7e51f036 --- /dev/null +++ b/build/174.json @@ -0,0 +1,51 @@ +{ + "id": "174", + "title": "Train a Simple GAN on 1D Gaussian Data", + "difficulty": "hard", + "category": "Deep Learning", + "video": "", + "likes": "0", + "dislikes": "0", + "contributor": [ + { + "profile_link": "https://github.com/moe18", + "name": "moe" + } + ], + "pytorch_difficulty": "medium", + "description": "In this task, you will train a Generative Adversarial Network (GAN) to learn a one-dimensional Gaussian distribution. The GAN consists of a generator that produces samples from latent noise and a discriminator that estimates the probability that a given sample is real. Both networks should have one hidden layer with ReLU activation in the hidden layer. The generator’s output layer is linear, while the discriminator's output layer uses a sigmoid activation.\n\nYou must train the GAN using the standard non-saturating GAN loss for the generator and binary cross-entropy loss for the discriminator. In the NumPy version, parameters should be updated using vanilla gradient descent. In the PyTorch version, parameters should be updated using stochastic gradient descent (SGD) with the specified learning rate. The training loop should alternate between updating the discriminator and the generator each iteration.\n\nYour function must return the trained generator forward function `gen_forward(z)`, which produces generated samples given latent noise.", + "learn_section": "## Understanding GANs for 1D Gaussian Data\nA Generative Adversarial Network (GAN) consists of two neural networks - a **Generator** $G_\\theta$ and a **Discriminator** $D_\\phi$ - trained in a minimax game.\n\n### 1. The Roles\n- **Generator** $G_\\theta(z)$: Takes a latent noise vector $z \\sim \\mathcal{N}(0, I)$ and outputs a sample intended to resemble the real data.\n- **Discriminator** $D_\\phi(x)$: Outputs a probability $p \\in (0, 1)$ that the input $x$ came from the real data distribution rather than the generator.\n\n### 2. The Objective\nThe classical GAN objective is:\n$$\n\\min_{\\theta} \\; \\max_{\\phi} \\; \\mathbb{E}_{x \\sim p_{\\text{data}}} [\\log D_\\phi(x)] + \\mathbb{E}_{z \\sim p(z)} [\\log (1 - D_\\phi(G_\\theta(z)))]\n$$\nHere:\n- $p_{\\text{data}}$ is the real data distribution.\n- $p(z)$ is the prior distribution for the latent noise (often standard normal).\n\n### 3. Practical Losses\nIn implementation, we minimize:\n- **Discriminator loss**:\n$$\n\\mathcal{L}_D = - \\left( \\frac{1}{m} \\sum_{i=1}^m \\log D(x^{(i)}_{\\text{real}}) + \\log(1 - D(x^{(i)}_{\\text{fake}})) \\right)\n$$\n- **Generator loss** (non-saturating form):\n$$\n\\mathcal{L}_G = - \\frac{1}{m} \\sum_{i=1}^m \\log D(G(z^{(i)}))\n$$\n\n### 4. Forward/Backward Flow\n1. **Discriminator step**: Real samples $x_{\\text{real}}$ and fake samples $x_{\\text{fake}} = G(z)$ are passed through $D$, and $\\mathcal{L}_D$ is minimized w.r.t. $\\phi$.\n2. **Generator step**: Fresh $z$ is sampled, $x_{\\text{fake}} = G(z)$ is passed through $D$, and $\\mathcal{L}_G$ is minimized w.r.t. $\\theta$ while keeping $\\phi$ fixed.\n\n### 5. Architecture for This Task\n- **Generator**: Fully connected layer ($\\mathbb{R}^{\\text{latent\\_dim}} \\to \\mathbb{R}^{\\text{hidden\\_dim}}$) -> ReLU -> Fully connected layer ($\\mathbb{R}^{\\text{hidden\\_dim}} \\to \\mathbb{R}^1$).\n- **Discriminator**: Fully connected layer ($\\mathbb{R}^1 \\to \\mathbb{R}^{\\text{hidden\\_dim}}$) → ReLU → Fully connected layer ($\\mathbb{R}^{\\text{hidden\\_dim}} \\to \\mathbb{R}^1$) → Sigmoid.\n\n### 6. Numerical Tips\n- Initialize weights with a small Gaussian ($\\mathcal{N}(0, 0.01)$).\n- Add $10^{-8}$ to logs for numerical stability.\n- Use a consistent batch size $m$ for both real and fake samples.\n- Always sample fresh noise for the generator on each update.\n\n**Your Task**: Implement the training loop to learn the parameters $\\theta$ and $\\phi$, and return the trained `gen_forward(z)` function. The evaluation (mean/std of generated samples) will be handled in the test cases.", + "starter_code": "import numpy as np\n\ndef train_gan(mean_real: float, std_real: float, latent_dim: int = 1, hidden_dim: int = 16, learning_rate: float = 0.001, epochs: int = 5000, batch_size: int = 128, seed: int = 42):\n \"\"\"\n Train a simple GAN to learn a 1D Gaussian distribution.\n\n Args:\n mean_real: Mean of the target Gaussian\n std_real: Std of the target Gaussian\n latent_dim: Dimension of the noise input to the generator\n hidden_dim: Hidden layer size for both networks\n learning_rate: Learning rate for gradient descent\n epochs: Number of training epochs\n batch_size: Training batch size\n seed: Random seed for reproducibility\n\n Returns:\n gen_forward: A function that takes z and returns generated samples\n \"\"\"\n # Your code here\n pass", + "solution": "import numpy as np\n\ndef relu(x):\n return np.maximum(0, x)\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef train_gan(mean_real: float, std_real: float, latent_dim: int = 1, hidden_dim: int = 16, learning_rate: float = 0.001, epochs: int = 5000, batch_size: int = 128, seed: int = 42):\n np.random.seed(seed)\n data_dim = 1\n\n # Initialize generator weights\n w1_g = np.random.normal(0, 0.01, (latent_dim, hidden_dim))\n b1_g = np.zeros(hidden_dim)\n w2_g = np.random.normal(0, 0.01, (hidden_dim, data_dim))\n b2_g = np.zeros(data_dim)\n\n # Initialize discriminator weights\n w1_d = np.random.normal(0, 0.01, (data_dim, hidden_dim))\n b1_d = np.zeros(hidden_dim)\n w2_d = np.random.normal(0, 0.01, (hidden_dim, 1))\n b2_d = np.zeros(1)\n\n def disc_forward(x):\n h1 = np.dot(x, w1_d) + b1_d\n a1 = relu(h1)\n logit = np.dot(a1, w2_d) + b2_d\n p = sigmoid(logit)\n return p, logit, a1, h1\n\n def gen_forward(z):\n h1 = np.dot(z, w1_g) + b1_g\n a1 = relu(h1)\n x_gen = np.dot(a1, w2_g) + b2_g\n return x_gen, a1, h1\n\n for epoch in range(epochs):\n # Sample real data\n x_real = np.random.normal(mean_real, std_real, batch_size)[:, None]\n z = np.random.normal(0, 1, (batch_size, latent_dim))\n x_fake, _, _ = gen_forward(z)\n\n # Discriminator forward\n p_real, _, a1_real, h1_real = disc_forward(x_real)\n p_fake, _, a1_fake, h1_fake = disc_forward(x_fake)\n\n # Discriminator gradients\n grad_logit_real = - (1 - p_real) / batch_size\n grad_a1_real = grad_logit_real @ w2_d.T\n grad_h1_real = grad_a1_real * (h1_real > 0)\n grad_w1_d_real = x_real.T @ grad_h1_real\n grad_b1_d_real = np.sum(grad_h1_real, axis=0)\n grad_w2_d_real = a1_real.T @ grad_logit_real\n grad_b2_d_real = np.sum(grad_logit_real, axis=0)\n\n grad_logit_fake = p_fake / batch_size\n grad_a1_fake = grad_logit_fake @ w2_d.T\n grad_h1_fake = grad_a1_fake * (h1_fake > 0)\n grad_w1_d_fake = x_fake.T @ grad_h1_fake\n grad_b1_d_fake = np.sum(grad_h1_fake, axis=0)\n grad_w2_d_fake = a1_fake.T @ grad_logit_fake\n grad_b2_d_fake = np.sum(grad_logit_fake, axis=0)\n\n grad_w1_d = grad_w1_d_real + grad_w1_d_fake\n grad_b1_d = grad_b1_d_real + grad_b1_d_fake\n grad_w2_d = grad_w2_d_real + grad_w2_d_fake\n grad_b2_d = grad_b2_d_real + grad_b2_d_fake\n\n w1_d -= learning_rate * grad_w1_d\n b1_d -= learning_rate * grad_b1_d\n w2_d -= learning_rate * grad_w2_d\n b2_d -= learning_rate * grad_b2_d\n\n # Generator update\n z = np.random.normal(0, 1, (batch_size, latent_dim))\n x_fake, a1_g, h1_g = gen_forward(z)\n p_fake, _, a1_d, h1_d = disc_forward(x_fake)\n\n grad_logit_fake = - (1 - p_fake) / batch_size\n grad_a1_d = grad_logit_fake @ w2_d.T\n grad_h1_d = grad_a1_d * (h1_d > 0)\n grad_x_fake = grad_h1_d @ w1_d.T\n\n grad_a1_g = grad_x_fake @ w2_g.T\n grad_h1_g = grad_a1_g * (h1_g > 0)\n grad_w1_g = z.T @ grad_h1_g\n grad_b1_g = np.sum(grad_h1_g, axis=0)\n grad_w2_g = a1_g.T @ grad_x_fake\n grad_b2_g = np.sum(grad_x_fake, axis=0)\n\n w1_g -= learning_rate * grad_w1_g\n b1_g -= learning_rate * grad_b1_g\n w2_g -= learning_rate * grad_w2_g\n b2_g -= learning_rate * grad_b2_g\n\n return gen_forward", + "example": { + "input": "gen_forward = train_gan(4.0, 1.25, epochs=1000, seed=42)\nz = np.random.normal(0, 1, (500, 1))\nx_gen, _, _ = gen_forward(z)\n(round(np.mean(x_gen), 4), round(np.std(x_gen), 4))", + "output": "(0.0004, 0.0002)", + "reasoning": "The test cases call `gen_forward` after training, sample 500 points, and then compute the mean and std." + }, + "test_cases": [ + { + "test": "gen_forward = train_gan(4.0, 1.25, epochs=1000, seed=42)\nz = np.random.normal(0, 1, (500, 1))\nx_gen, _, _ = gen_forward(z)\nprint((round(np.mean(x_gen), 4), round(np.std(x_gen), 4)))", + "expected_output": "(0.0004, 0.0002)" + }, + { + "test": "gen_forward = train_gan(0.0, 1.0, epochs=500, seed=0)\nz = np.random.normal(0, 1, (300, 1))\nx_gen, _, _ = gen_forward(z)\nprint((round(np.mean(x_gen), 4), round(np.std(x_gen), 4)))", + "expected_output": "(-0.0002, 0.0002)" + }, + { + "test": "gen_forward = train_gan(-2.0, 0.5, epochs=1500, seed=123)\nz = np.random.normal(0, 1, (400, 1))\nx_gen, _, _ = gen_forward(z)\nprint((round(np.mean(x_gen), 4), round(np.std(x_gen), 4)))", + "expected_output": "(-0.0044, 0.0002)" + } + ], + "pytorch_starter_code": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\ndef train_gan(mean_real: float, std_real: float, latent_dim: int = 1, hidden_dim: int = 16, learning_rate: float = 0.001, epochs: int = 5000, batch_size: int = 128, seed: int = 42):\n torch.manual_seed(seed)\n # Your PyTorch implementation here\n pass", + "pytorch_solution": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\ndef train_gan(mean_real: float, std_real: float, latent_dim: int = 1, hidden_dim: int = 16, learning_rate: float = 0.001, epochs: int = 5000, batch_size: int = 128, seed: int = 42):\n torch.manual_seed(seed)\n\n class Generator(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(latent_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, 1)\n )\n def forward(self, z):\n return self.net(z)\n\n class Discriminator(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(1, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, 1),\n nn.Sigmoid()\n )\n def forward(self, x):\n return self.net(x)\n\n G = Generator()\n D = Discriminator()\n\n # Use SGD as requested\n opt_G = optim.SGD(G.parameters(), lr=learning_rate)\n opt_D = optim.SGD(D.parameters(), lr=learning_rate)\n criterion = nn.BCELoss()\n\n for _ in range(epochs):\n # Real and fake batches\n real_data = torch.normal(mean_real, std_real, size=(batch_size, 1))\n noise = torch.randn(batch_size, latent_dim)\n fake_data = G(noise)\n\n # ----- Discriminator step -----\n opt_D.zero_grad()\n pred_real = D(real_data)\n pred_fake = D(fake_data.detach())\n loss_real = criterion(pred_real, torch.ones_like(pred_real))\n loss_fake = criterion(pred_fake, torch.zeros_like(pred_fake))\n loss_D = loss_real + loss_fake\n loss_D.backward()\n opt_D.step()\n\n # ----- Generator step -----\n opt_G.zero_grad()\n pred_fake = D(fake_data)\n # non-saturating generator loss: maximize log D(G(z)) -> minimize -log D(G(z))\n loss_G = criterion(pred_fake, torch.ones_like(pred_fake))\n loss_G.backward()\n opt_G.step()\n\n return G.forward", + "pytorch_test_cases": [ + { + "test": "gen_forward = train_gan(4.0, 1.25, epochs=100, seed=42)\nz = torch.randn(500, 1)\nx_gen = gen_forward(z)\nprint((round(x_gen.mean().item(), 4), round(x_gen.std().item(), 4)))", + "expected_output": "(0.4725, 0.3563)" + }, + { + "test": "gen_forward = train_gan(0.0, 1.0, epochs=50, seed=0)\nz = torch.randn(300, 1)\nx_gen = gen_forward(z)\nprint((round(x_gen.mean().item(), 4), round(x_gen.std().item(), 4)))", + "expected_output": "(0.0644, 0.244)" + } + ] +} \ No newline at end of file diff --git a/build/184.json b/build/184.json new file mode 100644 index 00000000..6f75a9aa --- /dev/null +++ b/build/184.json @@ -0,0 +1,42 @@ +{ + "id": "184", + "title": "Empirical Probability Mass Function (PMF)", + "difficulty": "easy", + "category": "Probability & Statistics", + "video": "", + "likes": "0", + "dislikes": "0", + "contributor": [ + { + "profile_link": "https://github.com/Jeet009", + "name": "Jeet Mukherjee" + } + ], + "description": "## Problem\n\nGiven a list of integer samples drawn from a discrete distribution, implement a function to compute the empirical Probability Mass Function (PMF). The function should return a list of `(value, probability)` pairs sorted by the value in ascending order. If the input is empty, return an empty list.", + "learn_section": "\n# Learn Section\n\n# Probability Mass Function (PMF) — Simple Explanation\n\nA **probability mass function (PMF)** describes how probabilities are assigned to the possible outcomes of a **discrete random variable**.\n\n- It tells you the chance of each specific outcome. \n- Each probability is non-negative. \n- The total of all probabilities adds up to 1.\n\n## Estimating from data\nIf the true probabilities are unknown, you can estimate them with an **empirical PMF**:\n- Count how often each outcome appears. \n- Divide by the total number of observations. \n\n## Example\nObserved sequence: `1, 2, 2, 3, 3, 3` (6 outcomes total)\n- \"1\" appears once → estimated probability = 1/6 \n- \"2\" appears twice → estimated probability = 2/6 = 1/3 \n- \"3\" appears three times → estimated probability = 3/6 = 1/2 \n\n\n ", + "starter_code": "def empirical_pmf(samples):\n \"\"\"\n Given an iterable of integer samples, return a list of (value, probability)\n pairs sorted by value ascending.\n \"\"\"\n # TODO: Implement the function\n pass", + "solution": "from collections import Counter\n\ndef empirical_pmf(samples):\n \"\"\"\n Given an iterable of integer samples, return a list of (value, probability)\n pairs sorted by value ascending.\n \"\"\"\n samples = list(samples)\n if not samples:\n return []\n total = len(samples)\n cnt = Counter(samples)\n result = [(k, cnt[k] / total) for k in sorted(cnt.keys())]\n return result", + "example": { + "input": "samples = [1, 2, 2, 3, 3, 3]", + "output": "[(1, 0.16666666666666666), (2, 0.3333333333333333), (3, 0.5)]", + "reasoning": "Counts are {1:1, 2:2, 3:3} over 6 samples, so probabilities are 1/6, 2/6, and 3/6 respectively, returned sorted by value." + }, + "test_cases": [ + { + "test": "print(empirical_pmf([1, 2, 2, 3, 3, 3]))", + "expected_output": "[(1, 0.16666666666666666), (2, 0.3333333333333333), (3, 0.5)]" + }, + { + "test": "print(empirical_pmf([5, 5, 5, 5]))", + "expected_output": "[(5, 1.0)]" + }, + { + "test": "print(empirical_pmf([]))", + "expected_output": "[]" + }, + { + "test": "print(empirical_pmf([0, 0, 1, 1, 1, 2]))", + "expected_output": "[(0, 0.3333333333333333), (1, 0.5), (2, 0.16666666666666666)]" + } + ] +} \ No newline at end of file diff --git a/build/187.json b/build/187.json new file mode 100644 index 00000000..5606ff6b --- /dev/null +++ b/build/187.json @@ -0,0 +1,72 @@ +{ + "id": "187", + "title": "Probability Addition Law: Compute P(A ∪ B)", + "difficulty": "easy", + "category": "Probability & Statistics", + "video": "", + "likes": "0", + "dislikes": "0", + "contributor": [ + { + "profile_link": "https://github.com/Jeet009", + "name": "Jeet Mukherjee" + } + ], + "tinygrad_difficulty": "easy", + "pytorch_difficulty": "easy", + "description": "## Problem\n\nTwo events `A` and `B` in a probability space have the following probabilities:\n\n- P(A) = 0.6\n- P(B) = 0.5\n- P(A ∩ B) = 0.3\n\nUsing the probability addition law, compute `P(A ∪ B)`.\n\nImplement a function `prob_union(p_a, p_b, p_intersection)` that returns `P(A ∪ B)` as a float.\n\nRecall: P(A ∪ B) = P(A) + P(B) − P(A ∩ B).\n\nNote: If `A` and `B` are mutually exclusive (disjoint), then `P(A ∩ B) = 0` and the rule simplifies to `P(A ∪ B) = P(A) + P(B)`.", + "learn_section": "## Solution Explanation\n\nThe probability addition law for any two events A and B states:\n\n$$\nP(A \\cup B) = P(A) + P(B) - P(A \\cap B)\n$$\n\n- The union counts outcomes in A or B (or both).\n- We subtract the intersection once to correct double-counting.\n\n### Mutually exclusive (disjoint) events\nIf A and B cannot occur together, then \\(P(A \\cap B) = 0\\) and the addition rule simplifies to:\n\\[\nP(A \\cup B) = P(A) + P(B)\n\\]\n\n### Plug in the given values\n\nGiven: \\(P(A)=0.6\\), \\(P(B)=0.5\\), \\(P(A \\cap B)=0.3\\)\n\n\\[\nP(A \\cup B) = 0.6 + 0.5 - 0.3 = 0.8\n\\]\n\n### Validity checks\n- Probabilities must lie in [0, 1]. The result 0.8 is valid.\n- Given inputs must satisfy: \\(0 \\le P(A \\cap B) \\le \\min\\{P(A), P(B)\\}\\) and \\(P(A \\cap B) \\ge P(A) + P(B) - 1\\). Here, 0.3 is within [0.1, 0.5], so inputs are consistent.\n\n### Implementation outline\n- Accept three floats: `p_a`, `p_b`, `p_intersection`.\n- Optionally assert basic bounds to help users catch mistakes.\n- Return `p_a + p_b - p_intersection`.", + "starter_code": "# Implement your function below.\n\ndef prob_union(p_a: float, p_b: float, p_intersection: float) -> float:\n\t\"\"\"Return P(A ∪ B) using the addition law.\n\n\tAuto-detects mutually exclusive events by treating very small P(A ∩ B) as 0.\n\n\tArguments:\n\t- p_a: P(A)\n\t- p_b: P(B)\n\t- p_intersection: P(A ∩ B)\n\n\tReturns:\n\t- float: P(A ∪ B)\n\t\"\"\"\n\t# TODO: if p_intersection is ~0, return p_a + p_b; else return p_a + p_b - p_intersection\n\traise NotImplementedError", + "solution": "def prob_union(p_a: float, p_b: float, p_intersection: float) -> float:\n\t\"\"\"Reference implementation for P(A ∪ B) with auto-detection of mutual exclusivity.\n\n\tIf p_intersection is numerically very small (≤ 1e-12), treat as 0 and\n\tuse the simplified rule P(A ∪ B) = P(A) + P(B).\n\t\"\"\"\n\tepsilon = 1e-12\n\tif p_intersection <= epsilon:\n\t\treturn p_a + p_b\n\treturn p_a + p_b - p_intersection", + "example": { + "input": "prob_union(0.6, 0.5, 0.3)", + "output": "0.8", + "reasoning": "By addition law: 0.6 + 0.5 − 0.3 = 0.8." + }, + "test_cases": [ + { + "test": "from solution import prob_union; print(prob_union(0.6, 0.5, 0.3))", + "expected_output": "0.8" + }, + { + "test": "from solution import prob_union; print(prob_union(0.2, 0.4, 0.1))", + "expected_output": "0.5" + }, + { + "test": "from solution import prob_union; print(prob_union(0.3, 0.2, 0.0))", + "expected_output": "0.5" + } + ], + "tinygrad_starter_code": "def prob_union(p_a: float, p_b: float, p_intersection: float) -> float:\n\t\"\"\"Return P(A ∪ B). Treat very small P(A ∩ B) as 0 (mutually exclusive).\"\"\"\n\traise NotImplementedError", + "tinygrad_solution": "def prob_union(p_a: float, p_b: float, p_intersection: float) -> float:\n\t\"\"\"Reference implementation for P(A ∪ B) with auto-detection (Tinygrad track).\"\"\"\n\tepsilon = 1e-12\n\tif p_intersection <= epsilon:\n\t\treturn p_a + p_b\n\treturn p_a + p_b - p_intersection", + "tinygrad_test_cases": [ + { + "test": "from solution import prob_union; print(prob_union(0.6, 0.5, 0.3))", + "expected_output": "0.8" + }, + { + "test": "from solution import prob_union; print(prob_union(0.2, 0.4, 0.1))", + "expected_output": "0.5" + }, + { + "test": "from solution import prob_union; print(prob_union(0.3, 0.2, 0.0))", + "expected_output": "0.5" + } + ], + "pytorch_starter_code": "def prob_union(p_a: float, p_b: float, p_intersection: float) -> float:\n\t\"\"\"Return P(A ∪ B). Treat very small P(A ∩ B) as 0 (mutually exclusive).\"\"\"\n\traise NotImplementedError", + "pytorch_solution": "def prob_union(p_a: float, p_b: float, p_intersection: float) -> float:\n\t\"\"\"Reference implementation for P(A ∪ B) with auto-detection (PyTorch track).\"\"\"\n\tepsilon = 1e-12\n\tif p_intersection <= epsilon:\n\t\treturn p_a + p_b\n\treturn p_a + p_b - p_intersection", + "pytorch_test_cases": [ + { + "test": "from solution import prob_union; print(prob_union(0.6, 0.5, 0.3))", + "expected_output": "0.8" + }, + { + "test": "from solution import prob_union; print(prob_union(0.2, 0.4, 0.1))", + "expected_output": "0.5" + }, + { + "test": "from solution import prob_union; print(prob_union(0.3, 0.2, 0.0))", + "expected_output": "0.5" + } + ] +} \ No newline at end of file diff --git a/build/2.json b/build/2.json index 8faf4c09..e42cf6db 100644 --- a/build/2.json +++ b/build/2.json @@ -33,15 +33,15 @@ "expected_output": "[[1, 4], [2, 5], [3, 6]]" } ], - "tinygrad_starter_code": "from tinygrad.tensor import Tensor\n\ndef transpose_matrix_tg(a) -> Tensor:\n \"\"\"\n Transpose a 2D matrix `a` using tinygrad.\n Inputs can be Python lists, NumPy arrays, or tinygrad Tensors.\n Returns a transposed Tensor.\n \"\"\"\n # Convert to Tensor\n a_t = Tensor(a)\n # Your implementation here\n pass", - "tinygrad_solution": "from tinygrad.tensor import Tensor\n\ndef transpose_matrix_tg(a) -> Tensor:\n \"\"\"\n Transpose a 2D matrix `a` using tinygrad.\n Inputs can be Python lists, NumPy arrays, or tinygrad Tensors.\n Returns a transposed Tensor.\n \"\"\"\n a_t = Tensor(a)\n return a_t.transpose(0,1)", + "tinygrad_starter_code": "from tinygrad.tensor import Tensor\n\ndef transpose_matrix_tg(a:Tensor) -> Tensor:\n \"\"\"\n Transpose a 2D matrix `a` using tinygrad.\n Inputs are tinygrad Tensors.\n Returns a transposed Tensor.\n \"\"\"\n pass", + "tinygrad_solution": "from tinygrad.tensor import Tensor\n\ndef transpose_matrix_tg(a) -> Tensor:\n \"\"\"\n Transpose a 2D matrix `a` using tinygrad.\n Inputs are tinygrad Tensors.\n Returns a transposed Tensor.\n \"\"\"\n return a.T", "tinygrad_test_cases": [ { - "test": "from tinygrad.tensor import Tensor\nres = transpose_matrix_tg([[1,2,3],[4,5,6]])\nprint(res.numpy().tolist())", + "test": "from tinygrad.tensor import Tensor\nres = transpose_matrix_tg(Tensor([[1,2,3],[4,5,6]]))\nprint(res.numpy().tolist())", "expected_output": "[[1, 4], [2, 5], [3, 6]]" }, { - "test": "from tinygrad.tensor import Tensor\nres = transpose_matrix_tg([[1,2],[3,4]])\nprint(res.numpy().tolist())", + "test": "from tinygrad.tensor import Tensor\nres = transpose_matrix_tg(Tensor([[1,2],[3,4]]))\nprint(res.numpy().tolist())", "expected_output": "[[1, 3], [2, 4]]" } ], diff --git a/build/3.json b/build/3.json index 5aecd531..366f7b2a 100644 --- a/build/3.json +++ b/build/3.json @@ -46,15 +46,15 @@ "expected_output": "[[1, 2, 3, 4], [5, 6, 7, 8]]" } ], - "tinygrad_starter_code": "from tinygrad.tensor import Tensor\n\ndef reshape_matrix_tg(a, new_shape) -> Tensor:\n \"\"\"\n Reshape a 2D matrix `a` to shape `new_shape` using tinygrad.\n Inputs can be Python lists, NumPy arrays, or tinygrad Tensors.\n Returns a Tensor of shape `new_shape`, or an empty Tensor on mismatch.\n \"\"\"\n # Dimension check\n if len(a) * len(a[0]) != new_shape[0] * new_shape[1]:\n return Tensor([])\n # Convert to Tensor and reshape\n a_t = Tensor(a)\n # Your implementation here\n pass", - "tinygrad_solution": "from tinygrad.tensor import Tensor\n\ndef reshape_matrix_tg(a, new_shape) -> Tensor:\n \"\"\"\n Reshape a 2D matrix `a` to shape `new_shape` using tinygrad.\n Inputs can be Python lists, NumPy arrays, or tinygrad Tensors.\n Returns a Tensor of shape `new_shape`, or an empty Tensor on mismatch.\n \"\"\"\n # Dimension check\n if len(a) * len(a[0]) != new_shape[0] * new_shape[1]:\n return Tensor([])\n a_t = Tensor(a)\n return a_t.reshape(new_shape)", + "tinygrad_starter_code": "from tinygrad.tensor import Tensor\n\ndef reshape_matrix_tg(a:Tensor, new_shape:tuple) -> Tensor:\n \"\"\"\n Reshape a 2D matrix `a` to shape `new_shape` using tinygrad.\n Inputs are tinygrad Tensors.\n Returns a Tensor of shape `new_shape`, or an empty Tensor on mismatch.\n \"\"\"\n pass", + "tinygrad_solution": "from tinygrad.tensor import Tensor\n\ndef reshape_matrix_tg(a, new_shape) -> Tensor:\n \"\"\"\n Reshape a 2D matrix `a` to shape `new_shape` using tinygrad.\n Inputs are tinygrad Tensors.\n Returns a Tensor of shape `new_shape`, or an empty Tensor on mismatch.\n \"\"\"\n # Dimension check\n if len(a) * len(a[0]) != new_shape[0] * new_shape[1]:\n return Tensor([])\n return a.reshape(new_shape)", "tinygrad_test_cases": [ { - "test": "from tinygrad.tensor import Tensor\nres = reshape_matrix_tg(\n [[1,2,3],[4,5,6]],\n (3, 2)\n)\nprint(res.numpy().tolist())", + "test": "from tinygrad.tensor import Tensor\nres = reshape_matrix_tg(\n Tensor([[1,2,3],[4,5,6]]),\n (3, 2)\n)\nprint(res.numpy().tolist())", "expected_output": "[[1, 2], [3, 4], [5, 6]]" }, { - "test": "from tinygrad.tensor import Tensor\nres = reshape_matrix_tg(\n [[1,2],[3,4]],\n (3, 2)\n)\nprint(res.numpy().tolist())", + "test": "from tinygrad.tensor import Tensor\nres = reshape_matrix_tg(\n Tensor([[1,2],[3,4]]),\n (3, 2)\n)\nprint(res.numpy().tolist())", "expected_output": "[]" } ], diff --git a/questions/187_probability-addition-law/description.md b/questions/187_probability-addition-law/description.md new file mode 100644 index 00000000..73d60706 --- /dev/null +++ b/questions/187_probability-addition-law/description.md @@ -0,0 +1,15 @@ +## Problem + +Two events `A` and `B` in a probability space have the following probabilities: + +- P(A) = 0.6 +- P(B) = 0.5 +- P(A ∩ B) = 0.3 + +Using the probability addition law, compute `P(A ∪ B)`. + +Implement a function `prob_union(p_a, p_b, p_intersection)` that returns `P(A ∪ B)` as a float. + +Recall: P(A ∪ B) = P(A) + P(B) − P(A ∩ B). + +Note: If `A` and `B` are mutually exclusive (disjoint), then `P(A ∩ B) = 0` and the rule simplifies to `P(A ∪ B) = P(A) + P(B)`. diff --git a/questions/187_probability-addition-law/example.json b/questions/187_probability-addition-law/example.json new file mode 100644 index 00000000..7ddce763 --- /dev/null +++ b/questions/187_probability-addition-law/example.json @@ -0,0 +1,5 @@ +{ + "input": "prob_union(0.6, 0.5, 0.3)", + "output": "0.8", + "reasoning": "By addition law: 0.6 + 0.5 − 0.3 = 0.8." +} diff --git a/questions/187_probability-addition-law/learn.md b/questions/187_probability-addition-law/learn.md new file mode 100644 index 00000000..d1038b38 --- /dev/null +++ b/questions/187_probability-addition-law/learn.md @@ -0,0 +1,33 @@ +## Solution Explanation + +The probability addition law for any two events A and B states: + +$$ +P(A \cup B) = P(A) + P(B) - P(A \cap B) +$$ + +- The union counts outcomes in A or B (or both). +- We subtract the intersection once to correct double-counting. + +### Mutually exclusive (disjoint) events +If A and B cannot occur together, then \(P(A \cap B) = 0\) and the addition rule simplifies to: +\[ +P(A \cup B) = P(A) + P(B) +\] + +### Plug in the given values + +Given: \(P(A)=0.6\), \(P(B)=0.5\), \(P(A \cap B)=0.3\) + +\[ +P(A \cup B) = 0.6 + 0.5 - 0.3 = 0.8 +\] + +### Validity checks +- Probabilities must lie in [0, 1]. The result 0.8 is valid. +- Given inputs must satisfy: \(0 \le P(A \cap B) \le \min\{P(A), P(B)\}\) and \(P(A \cap B) \ge P(A) + P(B) - 1\). Here, 0.3 is within [0.1, 0.5], so inputs are consistent. + +### Implementation outline +- Accept three floats: `p_a`, `p_b`, `p_intersection`. +- Optionally assert basic bounds to help users catch mistakes. +- Return `p_a + p_b - p_intersection`. diff --git a/questions/187_probability-addition-law/meta.json b/questions/187_probability-addition-law/meta.json new file mode 100644 index 00000000..58f1fd53 --- /dev/null +++ b/questions/187_probability-addition-law/meta.json @@ -0,0 +1,15 @@ +{ + "id": "187", + "title": "Probability Addition Law: Compute P(A ∪ B)", + "difficulty": "easy", + "category": "Probability", + "video": "", + "likes": "0", + "dislikes": "0", + "contributor": [ + { + "profile_link": "https://github.com/Jeet009", + "name": "Jeet Mukherjee" + } + ] +} diff --git a/questions/187_probability-addition-law/solution.py b/questions/187_probability-addition-law/solution.py new file mode 100644 index 00000000..c49aeaf7 --- /dev/null +++ b/questions/187_probability-addition-law/solution.py @@ -0,0 +1,10 @@ +def prob_union(p_a: float, p_b: float, p_intersection: float) -> float: + """Reference implementation for P(A ∪ B) with auto-detection of mutual exclusivity. + + If p_intersection is numerically very small (≤ 1e-12), treat as 0 and + use the simplified rule P(A ∪ B) = P(A) + P(B). + """ + epsilon = 1e-12 + if p_intersection <= epsilon: + return p_a + p_b + return p_a + p_b - p_intersection diff --git a/questions/187_probability-addition-law/starter_code.py b/questions/187_probability-addition-law/starter_code.py new file mode 100644 index 00000000..e4f8a1de --- /dev/null +++ b/questions/187_probability-addition-law/starter_code.py @@ -0,0 +1,17 @@ +# Implement your function below. + +def prob_union(p_a: float, p_b: float, p_intersection: float) -> float: + """Return P(A ∪ B) using the addition law. + + Auto-detects mutually exclusive events by treating very small P(A ∩ B) as 0. + + Arguments: + - p_a: P(A) + - p_b: P(B) + - p_intersection: P(A ∩ B) + + Returns: + - float: P(A ∪ B) + """ + # TODO: if p_intersection is ~0, return p_a + p_b; else return p_a + p_b - p_intersection + raise NotImplementedError diff --git a/questions/187_probability-addition-law/tests.json b/questions/187_probability-addition-law/tests.json new file mode 100644 index 00000000..61e25f39 --- /dev/null +++ b/questions/187_probability-addition-law/tests.json @@ -0,0 +1,5 @@ +[ + { "test": "from solution import prob_union; print(prob_union(0.6, 0.5, 0.3))", "expected_output": "0.8" }, + { "test": "from solution import prob_union; print(prob_union(0.2, 0.4, 0.1))", "expected_output": "0.5" }, + { "test": "from solution import prob_union; print(prob_union(0.3, 0.2, 0.0))", "expected_output": "0.5" } +]