| { | |
| "title": "Generative Models Mastery: 100 MCQs", | |
| "description": "A complete 100-question set covering fundamental concepts, algorithms, architectures, optimization techniques, and applications of Generative Models.", | |
| "questions": [ | |
| { | |
| "id": 1, | |
| "questionText": "What is the primary goal of a generative model?", | |
| "options": [ | |
| "To classify input data into categories", | |
| "To cluster data points", | |
| "To reduce dimensionality of data", | |
| "To generate new data samples similar to the training data" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Generative models aim to learn the underlying distribution of the data and generate new samples that resemble the training data." | |
| }, | |
| { | |
| "id": 2, | |
| "questionText": "Which of the following is a type of generative model?", | |
| "options": [ | |
| "Random Forest", | |
| "K-Means Clustering", | |
| "Variational Autoencoder (VAE)", | |
| "Support Vector Machine (SVM)" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "VAEs are generative models that learn a latent representation and can generate new data samples." | |
| }, | |
| { | |
| "id": 3, | |
| "questionText": "In generative modeling, what does the term 'latent space' refer to?", | |
| "options": [ | |
| "The output prediction space", | |
| "A lower-dimensional representation capturing the underlying factors of variation", | |
| "A space for storing training labels", | |
| "The input feature space" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Latent space encodes the hidden factors that capture important structure in the data, which can be used for generation." | |
| }, | |
| { | |
| "id": 4, | |
| "questionText": "Which of the following models uses a game-theoretic approach to generate data?", | |
| "options": [ | |
| "Naive Bayes", | |
| "Variational Autoencoder (VAE)", | |
| "Generative Adversarial Network (GAN)", | |
| "Principal Component Analysis (PCA)" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "GANs consist of a generator and a discriminator competing in a minimax game to generate realistic samples." | |
| }, | |
| { | |
| "id": 5, | |
| "questionText": "What distinguishes a generative model from a discriminative model?", | |
| "options": [ | |
| "Generative models learn the data distribution, discriminative models learn decision boundaries", | |
| "Discriminative models are always unsupervised", | |
| "Generative models only classify data, discriminative models generate data", | |
| "Generative models cannot be probabilistic, discriminative models can" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Generative models learn P(x) or P(x, y), whereas discriminative models learn P(y|x) to classify data." | |
| }, | |
| { | |
| "id": 6, | |
| "questionText": "Which of the following is a probabilistic generative model?", | |
| "options": [ | |
| "Naive Bayes", | |
| "Decision Tree", | |
| "SVM", | |
| "K-Nearest Neighbors" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Naive Bayes models the joint probability of features and class labels, making it a probabilistic generative model." | |
| }, | |
| { | |
| "id": 7, | |
| "questionText": "What is a key application of generative models in computer vision?", | |
| "options": [ | |
| "Color quantization", | |
| "Edge detection", | |
| "Image synthesis and inpainting", | |
| "Classification of handwritten digits" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Generative models can produce realistic images, fill missing parts, or create new images from learned distributions." | |
| }, | |
| { | |
| "id": 8, | |
| "questionText": "Which loss function is commonly used in Variational Autoencoders (VAE)?", | |
| "options": [ | |
| "Mean squared error only", | |
| "Hinge loss", | |
| "Reconstruction loss + KL divergence", | |
| "Cross-entropy loss" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "VAE optimizes reconstruction loss to reconstruct input data and KL divergence to regularize the latent space." | |
| }, | |
| { | |
| "id": 9, | |
| "questionText": "In GANs, what is the role of the discriminator?", | |
| "options": [ | |
| "To calculate reconstruction error", | |
| "To generate new data samples", | |
| "To distinguish real data from generated data", | |
| "To compress data into latent space" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "The discriminator evaluates whether a given sample is real or generated, providing feedback to the generator." | |
| }, | |
| { | |
| "id": 10, | |
| "questionText": "What is a common challenge in training GANs?", | |
| "options": [ | |
| "Vanishing gradient in VAE encoder", | |
| "Mode collapse", | |
| "Lack of reconstruction loss", | |
| "Overfitting on the latent space" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Mode collapse occurs when the generator produces limited variety, failing to cover the full data distribution." | |
| }, | |
| { | |
| "id": 11, | |
| "questionText": "Which model can both encode and generate data samples?", | |
| "options": [ | |
| "K-Means", | |
| "SVM", | |
| "Decision Tree", | |
| "Autoencoder" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Autoencoders compress data into a latent space (encoding) and reconstruct it (decoding), enabling generation." | |
| }, | |
| { | |
| "id": 12, | |
| "questionText": "In a VAE, why is the latent space regularized?", | |
| "options": [ | |
| "To ensure maximum likelihood estimation", | |
| "To allow smooth sampling and meaningful interpolation", | |
| "To prevent overfitting on labels", | |
| "To increase reconstruction error" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Regularizing the latent space with KL divergence ensures that points sampled from the prior produce realistic outputs." | |
| }, | |
| { | |
| "id": 13, | |
| "questionText": "Which generative model is non-probabilistic?", | |
| "options": [ | |
| "Hidden Markov Model", | |
| "Gaussian Mixture Model", | |
| "Variational Autoencoder (VAE)", | |
| "Vanilla Autoencoder" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Vanilla autoencoders learn deterministic mappings and do not model probability distributions explicitly." | |
| }, | |
| { | |
| "id": 14, | |
| "questionText": "Which of the following is an example of a sequential generative model?", | |
| "options": [ | |
| "Decision Tree", | |
| "Convolutional Neural Network (CNN)", | |
| "K-Means", | |
| "Recurrent Neural Network (RNN) based language models" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "RNN-based models can generate sequences like text or music by learning sequential dependencies." | |
| }, | |
| { | |
| "id": 15, | |
| "questionText": "Which generative model explicitly models the joint probability distribution of the data?", | |
| "options": [ | |
| "K-Nearest Neighbors", | |
| "Feedforward Neural Network Classifier", | |
| "Gaussian Mixture Model (GMM)", | |
| "PCA" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "GMM models P(x) as a mixture of Gaussian distributions, capturing the underlying data distribution." | |
| }, | |
| { | |
| "id": 16, | |
| "questionText": "In GAN training, what does the generator aim to maximize?", | |
| "options": [ | |
| "The KL divergence", | |
| "The reconstruction loss", | |
| "The classification accuracy", | |
| "The probability of the discriminator being mistaken" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "The generator tries to produce samples that fool the discriminator into classifying them as real." | |
| }, | |
| { | |
| "id": 17, | |
| "questionText": "What is a key difference between VAE and GAN?", | |
| "options": [ | |
| "GAN cannot generate images", | |
| "VAE is probabilistic and uses reconstruction loss; GAN uses adversarial loss", | |
| "Both are deterministic autoencoders", | |
| "VAE uses adversarial loss; GAN uses reconstruction loss" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "VAE models a probabilistic latent space and reconstruction loss, while GANs use a generator-discriminator game with adversarial loss." | |
| }, | |
| { | |
| "id": 18, | |
| "questionText": "Which type of generative model is suitable for clustering mixed continuous and categorical data?", | |
| "options": [ | |
| "RNN", | |
| "Gaussian Mixture Model (GMM)", | |
| "SVM", | |
| "Convolutional Autoencoder" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "GMMs can model continuous data probabilistically, and extensions exist for mixed data types." | |
| }, | |
| { | |
| "id": 19, | |
| "questionText": "What is the primary evaluation metric for generative models in image synthesis?", | |
| "options": [ | |
| "Classification accuracy", | |
| "Mean squared error on labels", | |
| "Confusion matrix", | |
| "Inception Score (IS) or FID" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "IS and FID measure the quality and diversity of generated images compared to real data." | |
| }, | |
| { | |
| "id": 20, | |
| "questionText": "Which of the following can generative models be used for in NLP?", | |
| "options": [ | |
| "Word classification only", | |
| "Sentence segmentation", | |
| "Text generation and language modeling", | |
| "Named entity recognition exclusively" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Generative models like RNNs or Transformers can generate coherent text sequences or predict next words." | |
| }, | |
| { | |
| "id": 21, | |
| "questionText": "Which approach is commonly used to stabilize GAN training?", | |
| "options": [ | |
| "Using deterministic latent space", | |
| "Increasing KL divergence weight", | |
| "Removing the generator", | |
| "Label smoothing and batch normalization" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Label smoothing and normalization techniques help prevent instability in the generator-discriminator game." | |
| }, | |
| { | |
| "id": 22, | |
| "questionText": "Which generative model can model complex, multi-modal distributions explicitly?", | |
| "options": [ | |
| "Linear Regression", | |
| "Standard Autoencoder", | |
| "Decision Tree", | |
| "Normalizing Flows" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Normalizing flows model complex distributions by transforming a simple base distribution via invertible functions." | |
| }, | |
| { | |
| "id": 23, | |
| "questionText": "Which of these models learns by minimizing divergence between true data distribution and model distribution?", | |
| "options": [ | |
| "Variational Autoencoder", | |
| "K-Means", | |
| "Random Forest", | |
| "Decision Tree" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "VAE minimizes reconstruction loss plus KL divergence, aligning the latent distribution with a prior." | |
| }, | |
| { | |
| "id": 24, | |
| "questionText": "Which type of generative model is based on a chain of conditional probabilities?", | |
| "options": [ | |
| "GANs", | |
| "Autoregressive models", | |
| "Feedforward Neural Networks", | |
| "VAEs" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Autoregressive models predict each element conditioned on previous elements, modeling the joint distribution sequentially." | |
| }, | |
| { | |
| "id": 25, | |
| "questionText": "What is a limitation of simple autoencoders as generative models?", | |
| "options": [ | |
| "They are deterministic and cannot sample new points smoothly", | |
| "They overfit the discriminator", | |
| "They cannot reduce dimensionality", | |
| "They require adversarial loss" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Simple autoencoders do not model probability distributions, so sampling new latent points may not generate realistic data." | |
| }, | |
| { | |
| "id": 26, | |
| "questionText": "Which of the following is a key property of a probabilistic generative model?", | |
| "options": [ | |
| "It performs clustering only", | |
| "It estimates P(x) or P(x, y)", | |
| "It maximizes classification accuracy", | |
| "It does not use probability distributions" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Probabilistic generative models explicitly model probability distributions of data or data-label pairs." | |
| }, | |
| { | |
| "id": 27, | |
| "questionText": "In conditional GANs (cGANs), what is provided to the generator additionally?", | |
| "options": [ | |
| "Only random noise", | |
| "The discriminator's parameters", | |
| "Conditioning information such as class labels", | |
| "Reconstruction loss" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "cGANs use additional conditioning variables to control the type of data generated, e.g., generating specific class images." | |
| }, | |
| { | |
| "id": 28, | |
| "questionText": "Which generative model is best for sequence-to-sequence data?", | |
| "options": [ | |
| "Autoencoders without temporal structure", | |
| "Gaussian Mixture Models", | |
| "CNNs only", | |
| "RNN-based or Transformer models" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "RNNs or Transformers can handle sequential dependencies, making them suitable for generating sequences." | |
| }, | |
| { | |
| "id": 29, | |
| "questionText": "Which of the following is a key challenge in training VAEs?", | |
| "options": [ | |
| "Mode collapse", | |
| "Label smoothing", | |
| "Vanishing discriminator gradient", | |
| "Balancing reconstruction loss and KL divergence" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "VAEs need to trade off reconstruction accuracy with latent space regularization using KL divergence." | |
| }, | |
| { | |
| "id": 30, | |
| "questionText": "Which scenario demonstrates the use of generative models in practice?", | |
| "options": [ | |
| "Generating realistic human faces from learned distributions", | |
| "Sorting a list of numbers", | |
| "Clustering sensor data without generation", | |
| "Computing shortest path in a graph" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Generative models can synthesize new data samples, e.g., realistic faces, by learning underlying distributions." | |
| }, | |
| { | |
| "id": 31, | |
| "questionText": "Which loss function is typically used in GANs?", | |
| "options": [ | |
| "Mean squared error", | |
| "Reconstruction loss only", | |
| "KL divergence only", | |
| "Adversarial loss (minimax)" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "GANs are trained using adversarial loss in a minimax game between generator and discriminator." | |
| }, | |
| { | |
| "id": 32, | |
| "questionText": "In a VAE, what is the purpose of the reparameterization trick?", | |
| "options": [ | |
| "To reduce mode collapse", | |
| "To normalize input images", | |
| "To improve discriminator accuracy", | |
| "To allow backpropagation through stochastic sampling" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "The reparameterization trick expresses the sampled latent variable as a differentiable function, enabling gradient-based optimization." | |
| }, | |
| { | |
| "id": 33, | |
| "questionText": "Which type of GAN explicitly conditions on auxiliary information?", | |
| "options": [ | |
| "Wasserstein GAN", | |
| "Vanilla GAN", | |
| "Conditional GAN (cGAN)", | |
| "DCGAN" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "cGANs incorporate additional conditioning variables, such as class labels, to control generation." | |
| }, | |
| { | |
| "id": 34, | |
| "questionText": "What is the main purpose of a discriminator in a GAN?", | |
| "options": [ | |
| "To cluster data points", | |
| "To reconstruct input data", | |
| "To encode input into latent space", | |
| "To distinguish real data from generated data" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "The discriminator evaluates the authenticity of samples, guiding the generator to produce realistic outputs." | |
| }, | |
| { | |
| "id": 35, | |
| "questionText": "Which architecture is commonly used for image generation in GANs?", | |
| "options": [ | |
| "Recurrent layers only", | |
| "SVM classifier", | |
| "Convolutional layers (DCGAN)", | |
| "Fully connected only" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "DCGANs leverage convolutional layers to capture spatial hierarchies for high-quality image generation." | |
| }, | |
| { | |
| "id": 36, | |
| "questionText": "What is mode collapse in GANs?", | |
| "options": [ | |
| "When latent space is regularized", | |
| "When reconstruction error increases", | |
| "When discriminator overfits", | |
| "When the generator produces limited variety of outputs" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Mode collapse occurs when the generator maps different latent vectors to similar outputs, reducing diversity." | |
| }, | |
| { | |
| "id": 37, | |
| "questionText": "Which generative model is based on sequential factorization of joint probability?", | |
| "options": [ | |
| "PCA", | |
| "VAEs", | |
| "Autoregressive models", | |
| "GANs" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Autoregressive models predict each variable conditioned on previous ones, effectively factorizing P(x) sequentially." | |
| }, | |
| { | |
| "id": 38, | |
| "questionText": "Which of the following is a key advantage of Normalizing Flows?", | |
| "options": [ | |
| "Works only for discrete data", | |
| "No need for latent space regularization", | |
| "Automatic mode collapse prevention", | |
| "Exact likelihood computation and invertibility" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Normalizing flows provide invertible mappings from latent to data space, allowing exact likelihood evaluation." | |
| }, | |
| { | |
| "id": 39, | |
| "questionText": "Which metric evaluates both quality and diversity of generated images?", | |
| "options": [ | |
| "KL divergence only", | |
| "Mean squared error", | |
| "Fréchet Inception Distance (FID)", | |
| "Cross-entropy loss" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "FID compares statistics of generated and real images to assess quality and diversity." | |
| }, | |
| { | |
| "id": 40, | |
| "questionText": "In a GAN, what does the generator network learn?", | |
| "options": [ | |
| "To map latent vectors to realistic samples", | |
| "To encode samples into latent vectors", | |
| "To classify images into categories", | |
| "To minimize KL divergence only" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "The generator transforms random noise (latent vectors) into data samples resembling the true distribution." | |
| }, | |
| { | |
| "id": 41, | |
| "questionText": "Which technique can stabilize GAN training?", | |
| "options": [ | |
| "Maximizing reconstruction error", | |
| "Wasserstein loss with gradient penalty", | |
| "Reducing latent vector size to 1", | |
| "Removing the discriminator" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Wasserstein GAN (WGAN) loss with gradient penalty improves convergence and reduces mode collapse." | |
| }, | |
| { | |
| "id": 42, | |
| "questionText": "Which generative model uses latent variables to represent data probabilistically?", | |
| "options": [ | |
| "Autoregressive model", | |
| "Decision Tree", | |
| "Variational Autoencoder (VAE)", | |
| "GAN" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "VAE learns a probabilistic latent space with parameters (mean and variance) to generate samples." | |
| }, | |
| { | |
| "id": 43, | |
| "questionText": "Which generative model is particularly suitable for text generation?", | |
| "options": [ | |
| "DCGAN", | |
| "GMM", | |
| "Convolutional Autoencoder", | |
| "RNN-based or Transformer-based models" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Sequential models like RNNs and Transformers capture temporal dependencies in text." | |
| }, | |
| { | |
| "id": 44, | |
| "questionText": "What is the main difference between explicit and implicit generative models?", | |
| "options": [ | |
| "Implicit models cannot generate samples", | |
| "Implicit models compute exact likelihood; explicit models approximate it", | |
| "Explicit models estimate data likelihood; implicit models do not", | |
| "Explicit models always use GANs; implicit models use VAEs" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Explicit models model the probability distribution (e.g., VAE, Normalizing Flows), whereas implicit models (GANs) learn to sample without computing likelihood." | |
| }, | |
| { | |
| "id": 45, | |
| "questionText": "Which model is most suitable for generating high-resolution images?", | |
| "options": [ | |
| "Progressive GAN or StyleGAN", | |
| "Vanilla Autoencoder", | |
| "Gaussian Mixture Model", | |
| "RNN" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Progressive GANs and StyleGANs can synthesize high-resolution images by progressively increasing image size during training." | |
| }, | |
| { | |
| "id": 46, | |
| "questionText": "Which of the following is an autoregressive generative model?", | |
| "options": [ | |
| "K-Means", | |
| "PixelRNN or PixelCNN", | |
| "GAN", | |
| "VAE" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "PixelRNN/CNN model images pixel by pixel, conditioning each on previous pixels." | |
| }, | |
| { | |
| "id": 47, | |
| "questionText": "Which of the following is a limitation of VAEs?", | |
| "options": [ | |
| "Cannot encode data", | |
| "Generated samples may be blurry", | |
| "Cannot model probability distributions", | |
| "Require adversarial loss" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "VAEs optimize a trade-off between reconstruction and regularization; this can result in less sharp images compared to GANs." | |
| }, | |
| { | |
| "id": 48, | |
| "questionText": "Which type of generative model is most suitable for density estimation?", | |
| "options": [ | |
| "Normalizing Flows", | |
| "GANs", | |
| "RNN for sequence generation", | |
| "Vanilla Autoencoders" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Normalizing flows allow exact likelihood computation, making them ideal for density estimation." | |
| }, | |
| { | |
| "id": 49, | |
| "questionText": "Which technique improves diversity of generated samples in GANs?", | |
| "options": [ | |
| "Minibatch discrimination", | |
| "Removing the discriminator", | |
| "Increasing KL divergence only", | |
| "Reducing latent space size" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Minibatch discrimination introduces dependencies between samples to prevent mode collapse." | |
| }, | |
| { | |
| "id": 50, | |
| "questionText": "Which loss is used in Wasserstein GANs (WGAN)?", | |
| "options": [ | |
| "Mean squared error", | |
| "Cross-entropy loss", | |
| "Earth-Mover (Wasserstein) distance", | |
| "KL divergence only" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "WGAN minimizes the Wasserstein distance between real and generated distributions for better training stability." | |
| }, | |
| { | |
| "id": 51, | |
| "questionText": "Which model learns a mapping from a simple distribution to a complex distribution using invertible functions?", | |
| "options": [ | |
| "Normalizing Flows", | |
| "Autoregressive model", | |
| "GAN", | |
| "VAE" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Normalizing flows use invertible transformations to map simple distributions (like Gaussian) to complex target distributions." | |
| }, | |
| { | |
| "id": 52, | |
| "questionText": "Which approach allows VAEs to generate smooth interpolations between samples?", | |
| "options": [ | |
| "Sequential sampling without regularization", | |
| "Random noise addition", | |
| "Regularized latent space with Gaussian prior", | |
| "Discriminator feedback" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "A regularized latent space ensures nearby points correspond to similar outputs, enabling smooth interpolation." | |
| }, | |
| { | |
| "id": 53, | |
| "questionText": "Which generative model can perform style transfer effectively?", | |
| "options": [ | |
| "RNN", | |
| "GANs (e.g., CycleGAN)", | |
| "Normalizing Flows", | |
| "Vanilla Autoencoders" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "CycleGAN and other GAN variants can transfer style between domains without paired data." | |
| }, | |
| { | |
| "id": 54, | |
| "questionText": "Which metric is used to compare distributions of generated and real data?", | |
| "options": [ | |
| "MSE", | |
| "KL divergence or JS divergence", | |
| "Accuracy", | |
| "F1-score" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "KL and JS divergence measure how similar the generated distribution is to the true distribution." | |
| }, | |
| { | |
| "id": 55, | |
| "questionText": "Which technique improves training of deep GANs for image synthesis?", | |
| "options": [ | |
| "Removing convolutional layers", | |
| "Progressive growing of generator and discriminator", | |
| "Reducing latent space dimension to 1", | |
| "Only using MSE loss" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Progressively increasing image resolution during training stabilizes GANs and produces high-quality images." | |
| }, | |
| { | |
| "id": 56, | |
| "questionText": "Which generative model is suitable for multi-modal outputs?", | |
| "options": [ | |
| "Linear regression", | |
| "Mixture density networks or VAEs with flexible priors", | |
| "K-Means", | |
| "Decision tree" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Flexible priors or mixture models allow generating diverse outputs representing multiple modes in the data." | |
| }, | |
| { | |
| "id": 57, | |
| "questionText": "Which of the following can be used to improve latent space disentanglement in VAEs?", | |
| "options": [ | |
| "Using only adversarial loss", | |
| "Increasing discriminator size", | |
| "Removing the encoder", | |
| "β-VAE with adjustable KL weight" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "β-VAE introduces a weight on KL divergence to encourage disentangled latent representations." | |
| }, | |
| { | |
| "id": 58, | |
| "questionText": "Which GAN variant is designed to reduce mode collapse?", | |
| "options": [ | |
| "VAE", | |
| "Vanilla GAN", | |
| "Autoregressive GAN", | |
| "Unrolled GAN" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Unrolled GAN simulates discriminator updates ahead of time to prevent mode collapse." | |
| }, | |
| { | |
| "id": 59, | |
| "questionText": "Which generative model is best for audio waveform synthesis?", | |
| "options": [ | |
| "Vanilla Autoencoder", | |
| "WaveNet (autoregressive model)", | |
| "GMM", | |
| "DCGAN" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "WaveNet uses autoregressive convolutions to generate realistic audio waveforms." | |
| }, | |
| { | |
| "id": 60, | |
| "questionText": "Which model is best for text-to-image generation?", | |
| "options": [ | |
| "PixelCNN", | |
| "RNN only", | |
| "Conditional GANs or Diffusion Models", | |
| "Standard VAE" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Conditional GANs and diffusion-based models can generate images conditioned on text descriptions." | |
| }, | |
| { | |
| "id": 61, | |
| "questionText": "Which of the following is a major challenge in generative modeling?", | |
| "options": [ | |
| "Reducing reconstruction only", | |
| "Maximizing classification accuracy", | |
| "Balancing diversity and quality of generated samples", | |
| "Minimizing clustering error" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Generative models must produce realistic and diverse samples, which is often challenging to balance." | |
| }, | |
| { | |
| "id": 62, | |
| "questionText": "Which of the following models can model conditional distributions directly?", | |
| "options": [ | |
| "Vanilla Autoencoder", | |
| "Conditional VAE or cGAN", | |
| "PCA", | |
| "Unsupervised GAN without labels" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Conditional generative models can generate samples based on input conditions like labels or attributes." | |
| }, | |
| { | |
| "id": 63, | |
| "questionText": "Which generative model is capable of exact likelihood evaluation?", | |
| "options": [ | |
| "RNN language model", | |
| "GANs", | |
| "Vanilla Autoencoders", | |
| "Normalizing Flows" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Invertible transformations in Normalizing Flows allow computing the exact probability of generated samples." | |
| }, | |
| { | |
| "id": 64, | |
| "questionText": "Which technique improves GAN convergence?", | |
| "options": [ | |
| "Reducing latent dimension to 1", | |
| "Removing batch normalization", | |
| "Using only fully connected layers", | |
| "Spectral normalization of discriminator weights" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Spectral normalization stabilizes discriminator updates and prevents gradient explosion." | |
| }, | |
| { | |
| "id": 65, | |
| "questionText": "Which generative model is suitable for semi-supervised learning?", | |
| "options": [ | |
| "Vanilla Autoencoder", | |
| "RNN autoregressive model", | |
| "Normalizing Flow", | |
| "GANs with auxiliary classifier (AC-GAN)" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "AC-GAN incorporates labels in training, enabling semi-supervised learning by generating labeled data." | |
| }, | |
| { | |
| "id": 66, | |
| "questionText": "Which generative model can combine multiple modalities (e.g., text and image)?", | |
| "options": [ | |
| "PixelCNN", | |
| "Multimodal VAEs or GANs", | |
| "Standard Autoencoder", | |
| "RNN language model" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Multimodal generative models can generate data conditioned on multiple input types." | |
| }, | |
| { | |
| "id": 67, | |
| "questionText": "Which GAN variant improves gradient flow and reduces training instability?", | |
| "options": [ | |
| "Vanilla GAN without batch normalization", | |
| "Conditional GAN without discriminator", | |
| "Wasserstein GAN with gradient penalty", | |
| "VAE with KL divergence only" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "WGAN-GP ensures smoother gradient updates, improving training stability." | |
| }, | |
| { | |
| "id": 68, | |
| "questionText": "Which evaluation metric measures similarity of feature distributions between real and generated images?", | |
| "options": [ | |
| "Accuracy", | |
| "KL divergence only", | |
| "MSE", | |
| "Fréchet Inception Distance (FID)" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "FID computes the distance between feature distributions of real and generated images, assessing both quality and diversity." | |
| }, | |
| { | |
| "id": 69, | |
| "questionText": "Which model is most suitable for density estimation in high-dimensional continuous data?", | |
| "options": [ | |
| "Standard Autoencoders", | |
| "GANs", | |
| "Normalizing Flows", | |
| "PixelCNN" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Normalizing flows can handle high-dimensional continuous data with exact likelihood computation." | |
| }, | |
| { | |
| "id": 70, | |
| "questionText": "Which technique can improve VAE image sharpness?", | |
| "options": [ | |
| "Removing decoder", | |
| "Using fully connected layers only", | |
| "Reducing KL divergence weight to zero", | |
| "Combining VAE with GAN (VAE-GAN)" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "VAE-GAN combines reconstruction with adversarial loss, producing sharper and more realistic images." | |
| }, | |
| { | |
| "id": 71, | |
| "questionText": "You are training a GAN for high-resolution image generation, but the generator produces blurry outputs. What is the most likely cause?", | |
| "options": [ | |
| "Training data is too small for a VAE", | |
| "Mode collapse in the discriminator", | |
| "Latent space regularization is too strong", | |
| "The model architecture or loss function is not suitable for high-resolution outputs" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Blurry outputs often result from inadequate generator architecture or loss function for high-resolution images. Solutions include using DCGAN, Progressive GAN, or VAE-GAN architectures." | |
| }, | |
| { | |
| "id": 72, | |
| "questionText": "During VAE training, the KL divergence term dominates the reconstruction loss. What effect does this have?", | |
| "options": [ | |
| "Causes mode collapse in the generator", | |
| "Reduces latent space smoothness", | |
| "The model may produce outputs similar to the prior but poorly reconstruct inputs", | |
| "Improves image sharpness" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Excessive KL weight forces latent variables to match the prior, reducing reconstruction fidelity." | |
| }, | |
| { | |
| "id": 73, | |
| "questionText": "You are using a conditional GAN to generate labeled images. The generator only produces images of a single class. What is happening?", | |
| "options": [ | |
| "Underfitting of VAE", | |
| "Overfitting of discriminator", | |
| "Latent space regularization", | |
| "Mode collapse" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "The generator collapses to producing limited outputs, ignoring class conditioning, which is classic mode collapse." | |
| }, | |
| { | |
| "id": 74, | |
| "questionText": "You want to generate diverse text sequences. Which generative model is most appropriate?", | |
| "options": [ | |
| "Gaussian Mixture Model", | |
| "Transformer-based autoregressive model", | |
| "DCGAN", | |
| "Vanilla Autoencoder" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Transformers model sequence dependencies well and can generate diverse, coherent text sequences." | |
| }, | |
| { | |
| "id": 75, | |
| "questionText": "You need exact likelihood evaluation for high-dimensional continuous data. Which model should you choose?", | |
| "options": [ | |
| "RNN autoregressive model", | |
| "GAN", | |
| "Normalizing Flows", | |
| "VAE" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Normalizing flows provide invertible mappings allowing exact likelihood computation, suitable for density estimation." | |
| }, | |
| { | |
| "id": 76, | |
| "questionText": "You want to combine VAE reconstruction with realistic image quality. Which approach is best?", | |
| "options": [ | |
| "PixelCNN", | |
| "DCGAN only", | |
| "VAE-GAN", | |
| "Vanilla VAE" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "VAE-GAN combines reconstruction loss with adversarial loss to generate sharp images while maintaining latent structure." | |
| }, | |
| { | |
| "id": 77, | |
| "questionText": "While training a GAN, gradients vanish and the generator fails to improve. Which technique helps?", | |
| "options": [ | |
| "Use only MSE loss", | |
| "Increase KL divergence", | |
| "Remove the discriminator", | |
| "Use Wasserstein loss with gradient penalty" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Wasserstein GAN with gradient penalty stabilizes training and prevents vanishing gradients." | |
| }, | |
| { | |
| "id": 78, | |
| "questionText": "You are generating multimodal data (images + text). Which generative approach is suitable?", | |
| "options": [ | |
| "Normalizing Flows for text only", | |
| "Multimodal VAE or GAN", | |
| "PixelRNN only", | |
| "Standard Autoencoder" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Multimodal generative models can handle multiple types of inputs and generate data conditioned on both modalities." | |
| }, | |
| { | |
| "id": 79, | |
| "questionText": "Your GAN produces high-quality images but only from a limited subset of the data distribution. What is this issue called?", | |
| "options": [ | |
| "Underfitting", | |
| "Mode collapse", | |
| "Overfitting", | |
| "Latent space regularization" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Mode collapse occurs when the generator ignores parts of the data distribution and produces limited variety." | |
| }, | |
| { | |
| "id": 80, | |
| "questionText": "Which evaluation metric can detect mode collapse in image generation?", | |
| "options": [ | |
| "Fréchet Inception Distance (FID)", | |
| "Accuracy", | |
| "MSE", | |
| "Cross-entropy" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "FID measures distributional similarity; poor FID often indicates lack of diversity or mode collapse." | |
| }, | |
| { | |
| "id": 81, | |
| "questionText": "Which approach encourages disentangled latent representations in VAEs?", | |
| "options": [ | |
| "PixelCNN", | |
| "Standard GAN", | |
| "β-VAE", | |
| "Autoregressive model" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "β-VAE adds weight to the KL divergence term, encouraging independent latent factors." | |
| }, | |
| { | |
| "id": 82, | |
| "questionText": "You are training a text-to-image model. Which generative architecture is suitable?", | |
| "options": [ | |
| "Autoregressive flow model", | |
| "PixelRNN only", | |
| "Conditional GAN or diffusion-based model", | |
| "VAE only" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Conditional GANs and diffusion models can generate images conditioned on text input." | |
| }, | |
| { | |
| "id": 83, | |
| "questionText": "In a VAE, if latent space dimension is too small, what is likely to happen?", | |
| "options": [ | |
| "Mode collapse", | |
| "Poor reconstruction quality due to information bottleneck", | |
| "Gradient explosion in discriminator", | |
| "Overfitting on test set" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "A small latent dimension limits information storage, reducing reconstruction fidelity." | |
| }, | |
| { | |
| "id": 84, | |
| "questionText": "Which GAN variant allows semi-supervised learning?", | |
| "options": [ | |
| "PixelCNN", | |
| "Normalizing Flow", | |
| "Standard VAE", | |
| "AC-GAN (Auxiliary Classifier GAN)" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "AC-GAN uses an auxiliary classifier in the discriminator to incorporate labeled data for semi-supervised learning." | |
| }, | |
| { | |
| "id": 85, | |
| "questionText": "Which method helps prevent mode collapse by making discriminator aware of multiple samples?", | |
| "options": [ | |
| "KL divergence scaling", | |
| "Gradient clipping", | |
| "Latent space regularization", | |
| "Minibatch discrimination" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "Minibatch discrimination introduces dependencies among samples, encouraging generator diversity." | |
| }, | |
| { | |
| "id": 86, | |
| "questionText": "You are training a GAN on limited data, but it overfits. Which technique can help?", | |
| "options": [ | |
| "Increase latent space dimension", | |
| "Data augmentation and regularization", | |
| "Remove the discriminator", | |
| "Reduce batch size to 1" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Data augmentation expands the dataset, helping the generator and discriminator generalize better." | |
| }, | |
| { | |
| "id": 87, | |
| "questionText": "Which generative model is best for continuous sequence prediction (e.g., speech waveforms)?", | |
| "options": [ | |
| "PixelCNN", | |
| "DCGAN", | |
| "VAE only", | |
| "WaveNet (autoregressive model)" | |
| ], | |
| "correctAnswerIndex": 3, | |
| "explanation": "WaveNet uses autoregressive convolutions suitable for generating continuous sequences like audio." | |
| }, | |
| { | |
| "id": 88, | |
| "questionText": "Which model can generate new images while maintaining semantic content from a reference image?", | |
| "options": [ | |
| "PixelCNN", | |
| "Conditional GAN (e.g., Pix2Pix or CycleGAN)", | |
| "VAE without conditioning", | |
| "Vanilla Autoencoder" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Conditional GANs can generate images conditioned on a reference, preserving structure while changing style." | |
| }, | |
| { | |
| "id": 89, | |
| "questionText": "You need to evaluate generated text quality. Which metric is suitable?", | |
| "options": [ | |
| "BLEU or ROUGE score", | |
| "MSE", | |
| "KL divergence only", | |
| "FID" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "BLEU and ROUGE compare generated text against reference text for content quality and fluency." | |
| }, | |
| { | |
| "id": 90, | |
| "questionText": "Which approach can generate realistic images from random noise efficiently?", | |
| "options": [ | |
| "VAE without adversarial loss", | |
| "PixelCNN only", | |
| "GAN with convolutional generator", | |
| "Autoregressive RNN" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Convolutional GANs transform random latent vectors into high-quality images efficiently." | |
| }, | |
| { | |
| "id": 91, | |
| "questionText": "Which challenge is common in conditional generative models?", | |
| "options": [ | |
| "Reconstruction error is zero", | |
| "Generator ignoring conditioning labels (mode collapse)", | |
| "Gradient explosion in decoder", | |
| "Overfitting latent space only" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Conditional models may fail to produce diverse outputs for all conditions, leading to mode collapse." | |
| }, | |
| { | |
| "id": 92, | |
| "questionText": "Which technique allows GANs to handle high-resolution images more effectively?", | |
| "options": [ | |
| "VAE reconstruction only", | |
| "Progressive growing of generator and discriminator", | |
| "Reducing latent space dimension to 1", | |
| "Removing convolutional layers" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Progressively increasing image resolution during training stabilizes GANs and enables high-resolution synthesis." | |
| }, | |
| { | |
| "id": 93, | |
| "questionText": "You want to interpolate between two generated faces smoothly. Which model property is critical?", | |
| "options": [ | |
| "Regularized latent space (e.g., in VAE)", | |
| "Autoregressive pixel modeling", | |
| "Mode collapse prevention only", | |
| "Large discriminator" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "A smooth latent space ensures that interpolating between points generates meaningful intermediate outputs." | |
| }, | |
| { | |
| "id": 94, | |
| "questionText": "Which generative model is suitable for generating tabular data with mixed categorical and continuous features?", | |
| "options": [ | |
| "WaveNet", | |
| "CTGAN or GMM-based models", | |
| "PixelCNN", | |
| "VAE for images only" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "CTGANs can handle tabular data and model mixed feature types effectively." | |
| }, | |
| { | |
| "id": 95, | |
| "questionText": "Which technique improves GAN training stability and reduces oscillations?", | |
| "options": [ | |
| "Using fully connected layers only", | |
| "Reducing latent dimension to 1", | |
| "Using Wasserstein loss with gradient penalty", | |
| "Only reconstruction loss" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "WGAN-GP provides smoother gradients, improving convergence and stability in training GANs." | |
| }, | |
| { | |
| "id": 96, | |
| "questionText": "Which generative model allows controlled attribute manipulation (e.g., changing hair color in images)?", | |
| "options": [ | |
| "Normalizing Flows only", | |
| "PixelCNN", | |
| "Conditional GANs or StyleGAN", | |
| "Vanilla Autoencoder" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Conditional GANs and StyleGAN allow latent space manipulations to change attributes while keeping other content fixed." | |
| }, | |
| { | |
| "id": 97, | |
| "questionText": "You observe that GAN training oscillates and fails to converge. Which step is recommended?", | |
| "options": [ | |
| "Use gradient penalty, spectral normalization, or learning rate tuning", | |
| "Remove the generator entirely", | |
| "Use only MSE loss", | |
| "Increase latent dimension to 10,000" | |
| ], | |
| "correctAnswerIndex": 0, | |
| "explanation": "Techniques like gradient penalty and spectral normalization stabilize GAN training and reduce oscillations." | |
| }, | |
| { | |
| "id": 98, | |
| "questionText": "Which generative model is most appropriate for music generation?", | |
| "options": [ | |
| "DCGAN only", | |
| "Standard VAE without temporal modeling", | |
| "RNN-based or Transformer-based models", | |
| "PixelCNN" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "Sequential models like RNNs or Transformers can capture temporal dependencies for music synthesis." | |
| }, | |
| { | |
| "id": 99, | |
| "questionText": "You need a generative model that can produce multiple diverse outputs for a single input. Which approach is suitable?", | |
| "options": [ | |
| "PixelCNN", | |
| "Conditional VAE or multimodal GAN", | |
| "Vanilla GAN only", | |
| "Standard Autoencoder" | |
| ], | |
| "correctAnswerIndex": 1, | |
| "explanation": "Conditional VAEs or multimodal GANs allow sampling diverse outputs for the same input condition." | |
| }, | |
| { | |
| "id": 100, | |
| "questionText": "Which approach allows a VAE to generate sharper images without sacrificing latent space structure?", | |
| "options": [ | |
| "Use only MSE loss", | |
| "Remove KL divergence", | |
| "Combine with adversarial loss (VAE-GAN)", | |
| "Reduce latent dimension to 1" | |
| ], | |
| "correctAnswerIndex": 2, | |
| "explanation": "VAE-GAN leverages adversarial loss to improve image sharpness while retaining smooth latent representations." | |
| } | |
| ] | |
| } | |