GenCN UI

Human Verification

GencnUI-powered human verification component that uses AI to generate verification instructions and verify selfies through Chrome's LanguageModel API.

Try out the component below to see how it generates verification instructions and verifies selfies through Chrome's LanguageModel API.

Loading preview...
"use client";

import { useState } from "react";
import { GencnUIHumanVerification } from "@/registry/new-york/gencn-ui/items/human-verification/gencn-ui-human-verification";
import {
  Card,
  CardContent,
  CardDescription,
  CardHeader,
  CardTitle,
} from "@/components/ui/card";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { CheckCircle2, FileText, Mic, Image as ImageIcon } from "lucide-react";

export function GencnUIHumanVerificationExample() {
  const [verifiedText, setVerifiedText] = useState(false);
  const [verifiedAudio, setVerifiedAudio] = useState(false);
  const [verifiedImage, setVerifiedImage] = useState(false);
  const [confidenceText, setConfidenceText] = useState<number | null>(null);
  const [confidenceAudio, setConfidenceAudio] = useState<number | null>(null);
  const [confidenceImage, setConfidenceImage] = useState<number | null>(null);

  return (
    <Card className="w-full border-none shadow-none">
      <CardHeader>
        <CardTitle>Human Verification</CardTitle>
        <CardDescription>
          Verify you are human using AI-powered verification. Choose from three verification types: Text Q&A, Audio, or Image.
        </CardDescription>
      </CardHeader>
      <CardContent className="space-y-4">
        <Tabs defaultValue="text" className="w-full">
          <TabsList className="grid w-full grid-cols-3">
            <TabsTrigger value="text">
              <FileText className="h-4 w-4 mr-2" />
              Text Q&A
            </TabsTrigger>
            <TabsTrigger value="audio">
              <Mic className="h-4 w-4 mr-2" />
              Audio
            </TabsTrigger>
            <TabsTrigger value="image">
              <ImageIcon className="h-4 w-4 mr-2" />
              Image
            </TabsTrigger>
          </TabsList>

          <TabsContent value="text" className="space-y-4">
            <div className="space-y-2">
              <h3 className="text-sm font-medium">Text Q&A Verification</h3>
              <p className="text-sm text-muted-foreground">
                Answer a general knowledge question to verify you are human. The question will be generated based on the topic you specify.
              </p>
            </div>
            <GencnUIHumanVerification
              type="text"
              context={{
                topic: "city names",
                domain: "Geography",
              }}
              buttonText="Start Text Verification"
              onVerified={(conf) => {
                setVerifiedText(true);
                setConfidenceText(conf);
              }}
              onVerificationFailed={() => {
                setVerifiedText(false);
                setConfidenceText(null);
              }}
              onError={(error) => {
                console.error("Text verification error:", error);
              }}
            />
            {verifiedText && confidenceText !== null && (
              <Alert>
                <CheckCircle2 className="h-4 w-4" />
                <AlertDescription>
                  ✓ Text verification successful! Confidence: {(confidenceText * 100).toFixed(1)}%
                </AlertDescription>
              </Alert>
            )}
          </TabsContent>

          <TabsContent value="audio" className="space-y-4">
            <div className="space-y-2">
              <h3 className="text-sm font-medium">Audio Verification</h3>
              <p className="text-sm text-muted-foreground">
                Speak a randomly generated alphabet sequence to verify you are human. You can record using your microphone or upload an audio file.
              </p>
            </div>
        <GencnUIHumanVerification
              type="audio"
              buttonText="Start Audio Verification"
              onVerified={(conf) => {
                setVerifiedAudio(true);
                setConfidenceAudio(conf);
          }}
          onVerificationFailed={() => {
                setVerifiedAudio(false);
                setConfidenceAudio(null);
          }}
          onError={(error) => {
                console.error("Audio verification error:", error);
          }}
        />
            {verifiedAudio && confidenceAudio !== null && (
              <Alert>
                <CheckCircle2 className="h-4 w-4" />
                <AlertDescription>
                  ✓ Audio verification successful! Confidence: {(confidenceAudio * 100).toFixed(1)}%
                </AlertDescription>
              </Alert>
            )}
          </TabsContent>

          <TabsContent value="image" className="space-y-4">
            <div className="space-y-2">
              <h3 className="text-sm font-medium">Image Verification</h3>
              <p className="text-sm text-muted-foreground">
                Follow a generated instruction and take a selfie to verify you are human. The AI will analyze your photo to confirm you followed the instruction.
              </p>
          </div>
            <GencnUIHumanVerification
              type="image"
              buttonText="Start Image Verification"
              onVerified={(conf) => {
                setVerifiedImage(true);
                setConfidenceImage(conf);
              }}
              onVerificationFailed={() => {
                setVerifiedImage(false);
                setConfidenceImage(null);
              }}
              onError={(error) => {
                console.error("Image verification error:", error);
              }}
            />
            {verifiedImage && confidenceImage !== null && (
              <Alert>
                <CheckCircle2 className="h-4 w-4" />
                <AlertDescription>
                  ✓ Image verification successful! Confidence: {(confidenceImage * 100).toFixed(1)}%
                </AlertDescription>
              </Alert>
            )}
          </TabsContent>
        </Tabs>
      </CardContent>
    </Card>
  );
}

Server API

Path:
/api/human-verification/generate
Source:
import { google, createGoogleGenerativeAI } from "@ai-sdk/google";
import { generateText } from "ai";

// Allow responses up to 30 seconds
export const maxDuration = 30;

interface GenerateVerificationRequest {
  type: "text" | "audio" | "image";
  context?: {
    topic?: string;
    domain?: string;
  };
  instructionPrompt?: string;
  LLM_API_KEY?: string;
  LLM_MODEL?: string;
}

export async function POST(req: Request) {
  try {
    const request: GenerateVerificationRequest = await req.json();
    const { type, context, instructionPrompt, LLM_API_KEY, LLM_MODEL } = request;

    if (!type) {
      return new Response(
        JSON.stringify({ error: "type is required" }),
        {
          status: 400,
          headers: { "Content-Type": "application/json" },
        }
      );
    }

    // Create provider instance with manual API key if provided, otherwise use default
    const googleProvider = LLM_API_KEY
      ? createGoogleGenerativeAI({ apiKey: LLM_API_KEY })
      : google;

    // Convert "auto" to a valid model name
    const model = LLM_MODEL === "auto" || !LLM_MODEL ? "gemini-2.5-flash" : LLM_MODEL;

    let prompt: string;
    let temperature = 0.7;

    if (type === "text") {
      // Generate Q&A question - simple questions that kids can answer
      const topicContext = context?.topic
        ? `Topic: ${context.topic}${context.domain ? ` (Domain: ${context.domain})` : ""}`
        : "";
      prompt = `Generate a very simple, easy general knowledge question${topicContext ? ` about ${topicContext}` : ""}. The question should be so simple that even a child can answer it. Examples: "What color is the sky?", "How many legs does a cat have?", "What do we use to write?". Keep it extremely simple and basic. Respond with ONLY the question text, no labels or prefixes. Maximum 15 words.`;
    } else if (type === "audio") {
      // Generate alphabet sequence
      prompt = `Generate a random sequence of 4-6 letters (e.g., "A-B-C-D" or "X-Y-Z"). The sequence should be easy to pronounce. Respond with ONLY the letter sequence separated by hyphens, no labels or prefixes. Example format: "A-B-C-D"`;
    } else {
      // Image instruction
      temperature = 1.0; // Higher temperature for more variety
      prompt = instructionPrompt
        ? `${instructionPrompt}\n\nGenerate a unique, creative variation of this instruction. Each time you generate, create a DIFFERENT gesture or pose instruction. Vary the gestures, poses, and wording. IMPORTANT: Respond with ONLY ONE instruction sentence. Do not include labels, prefixes, or multiple instructions. Just the instruction text itself. Maximum 15 words.`
        : "Generate a selfie verification instruction. Respond with ONLY the instruction text, no labels.";
    }

    const result = await generateText({
      model: googleProvider(model),
      prompt,
      temperature,
      topP: type === "image" ? 0.95 : 0.9,
    });

    // Clean up the generated text
    let cleaned = result.text.trim();

    // Remove labels/prefixes
    cleaned = cleaned.replace(/^(Question:?\s*)/i, "");
    cleaned = cleaned.replace(/^(Sequence:?\s*)/i, "");
    cleaned = cleaned.replace(/^(Selfie\s*(verification)?:?\s*)/i, "");
    cleaned = cleaned.replace(/^(Instruction:?\s*)/i, "");
    cleaned = cleaned.replace(/^(Verification:?\s*)/i, "");

    // Remove markdown formatting
    cleaned = cleaned.replace(/\*\*/g, "");
    cleaned = cleaned.replace(/^#+\s*/gm, "");
    cleaned = cleaned.replace(/^\d+\.\s*/gm, "");
    cleaned = cleaned.replace(/^[-*]\s*/gm, "");
    cleaned = cleaned.replace(/^["']|["']$/g, "");

    // For image, take first sentence
    if (type === "image") {
      const firstSentenceMatch = cleaned.match(/^([^.!?\n]+[.!?]?)/);
      if (firstSentenceMatch) {
        cleaned = firstSentenceMatch[1].trim();
      } else {
        cleaned = cleaned.split(/\s+/).slice(0, 15).join(" ");
      }
      cleaned = cleaned.replace(/^[^:]+:\s*/, "");
      const words = cleaned.split(/\s+/).slice(0, 15);
      cleaned = words.join(" ");
      if (!cleaned.match(/[.!?]$/)) {
        cleaned = cleaned + ".";
      }
    }

    // Return appropriate field name based on type
    const response: Record<string, string> = {};
    if (type === "text") {
      response.question = cleaned;
    } else if (type === "audio") {
      response.alphabetSequence = cleaned;
    } else {
      response.instruction = cleaned;
    }
    response.challenge = cleaned; // Also include as challenge for unified access

    return new Response(JSON.stringify(response), {
      headers: { "Content-Type": "application/json" },
    });
  } catch (error) {
    console.error("Generate verification API error:", error);
    return new Response(
      JSON.stringify({
        error: "Internal server error",
        message: (error as Error).message,
      }),
      {
        status: 500,
        headers: { "Content-Type": "application/json" },
      }
    );
  }
}

Path:
/api/human-verification/verify-text
Source:
import { google, createGoogleGenerativeAI } from "@ai-sdk/google";
import { generateObject } from "ai";
import { z } from "zod";

// Allow responses up to 30 seconds
export const maxDuration = 30;

interface VerifyTextRequest {
  question: string;
  answer: string;
  LLM_API_KEY?: string;
  LLM_MODEL?: string;
}

export async function POST(req: Request) {
  try {
    const request: VerifyTextRequest = await req.json();
    const { question, answer, LLM_API_KEY, LLM_MODEL } = request;

    if (!question) {
      return new Response(JSON.stringify({ error: "question is required" }), {
        status: 400,
        headers: { "Content-Type": "application/json" },
      });
    }

    if (!answer) {
      return new Response(JSON.stringify({ error: "answer is required" }), {
        status: 400,
        headers: { "Content-Type": "application/json" },
      });
    }

    // Create provider instance with manual API key if provided, otherwise use default
    const googleProvider = LLM_API_KEY
      ? createGoogleGenerativeAI({ apiKey: LLM_API_KEY })
      : google;

    // Convert "auto" to a valid model name
    const model =
      LLM_MODEL === "auto" || !LLM_MODEL ? "gemini-2.5-flash" : LLM_MODEL;

    if (model === "gemini-2.5-flash") {
      throw new Error("Other models disabled for demo purposes");
    }

    const promptText = `Question: "${question}"\nAnswer: "${answer}"\n\nIs this answer correct? Analyze the answer carefully and respond with a JSON object containing:
- "verified" (true if the answer is correct, false otherwise)
- "confidence" (a number between 0 and 1)
- "reason" (if verified is false, provide a brief reason why)`;

    const schema = z.object({
      verified: z.boolean(),
      confidence: z.number().min(0).max(1),
      reason: z.string().optional(),
    });

    const result = await generateObject({
      model: googleProvider(model),
      schema,
      prompt: promptText,
    });

    return new Response(
      JSON.stringify({
        verified: result.object.verified,
        confidence: result.object.confidence,
        reason: result.object.reason,
      }),
      {
        headers: { "Content-Type": "application/json" },
      }
    );
  } catch (error) {
    console.error("Verify text API error:", error);
    return new Response(
      JSON.stringify({
        error: "Internal server error",
        message: (error as Error).message,
      }),
      {
        status: 500,
        headers: { "Content-Type": "application/json" },
      }
    );
  }
}
Path:
/api/human-verification/verify-audio
Source:
import { google, createGoogleGenerativeAI } from "@ai-sdk/google";
import { generateObject } from "ai";
import { z } from "zod";

// Allow responses up to 30 seconds
export const maxDuration = 30;

interface VerifyAudioRequest {
  alphabetSequence: string;
  audio: string; // base64 encoded
  LLM_API_KEY?: string;
  LLM_MODEL?: string;
}

// Convert base64 data URL to buffer
function base64ToBuffer(base64: string): Buffer {
  // Remove data URL prefix if present (e.g., "data:audio/webm;base64,")
  const base64Data = base64.includes(",")
    ? base64.split(",")[1]
    : base64;
  return Buffer.from(base64Data, "base64");
}

export async function POST(req: Request) {
  try {
    const request: VerifyAudioRequest = await req.json();
    const { alphabetSequence, audio, LLM_API_KEY, LLM_MODEL } = request;

    if (!alphabetSequence) {
      return new Response(
        JSON.stringify({ error: "alphabetSequence is required" }),
        {
          status: 400,
          headers: { "Content-Type": "application/json" },
        }
      );
    }

    if (!audio) {
      return new Response(
        JSON.stringify({ error: "audio is required" }),
        {
          status: 400,
          headers: { "Content-Type": "application/json" },
        }
      );
    }

    // Create provider instance with manual API key if provided, otherwise use default
    const googleProvider = LLM_API_KEY
      ? createGoogleGenerativeAI({ apiKey: LLM_API_KEY })
      : google;

    // Convert "auto" to a valid model name
    const model = LLM_MODEL === "auto" || !LLM_MODEL ? "gemini-2.5-flash" : LLM_MODEL;

    if(model === "gemini-2.5-flash") {
      throw new Error("Other models disabled for demo purposes");
    }

    const promptText = `The user was asked to speak this alphabet sequence: "${alphabetSequence}". Does this audio recording contain the user speaking these letters in the correct order? Analyze the audio carefully and respond with a JSON object containing:
- "verified" (true if the audio contains the correct sequence, false otherwise)
- "confidence" (a number between 0 and 1)
- "reason" (if verified is false, provide a brief reason why)`;

    const schema = z.object({
      verified: z.boolean(),
      confidence: z.number().min(0).max(1),
      reason: z.string().optional(),
    });

    // Convert base64 to buffer
    const audioBuffer = base64ToBuffer(audio);

    const result = await generateObject({
      model: googleProvider(model),
      schema,
      prompt: [
        {
          role: "user",
          content: [
            { type: "text", text: promptText },
            {
              type: "file",
              data: audioBuffer,
              mediaType: "audio/webm",
            },
          ],
        },
      ],
    });

    return new Response(
      JSON.stringify({
        verified: result.object.verified,
        confidence: result.object.confidence,
        reason: result.object.reason,
      }),
      {
        headers: { "Content-Type": "application/json" },
      }
    );
  } catch (error) {
    console.error("Verify audio API error:", error);
    return new Response(
      JSON.stringify({
        error: "Internal server error",
        message: (error as Error).message,
      }),
      {
        status: 500,
        headers: { "Content-Type": "application/json" },
      }
    );
  }
}

Path:
/api/human-verification/verify-image
Source:
import { google, createGoogleGenerativeAI } from "@ai-sdk/google";
import { generateObject } from "ai";
import { z } from "zod";

// Allow responses up to 30 seconds
export const maxDuration = 30;

interface VerifyImageRequest {
  instruction: string;
  image: string; // base64 encoded image
  LLM_API_KEY?: string;
  LLM_MODEL?: string;
}

// Convert base64 data URL to buffer
function base64ToBuffer(base64: string): Buffer {
  // Remove data URL prefix if present (e.g., "data:image/jpeg;base64,")
  const base64Data = base64.includes(",")
    ? base64.split(",")[1]
    : base64;
  return Buffer.from(base64Data, "base64");
}

export async function POST(req: Request) {
  try {
    const request: VerifyImageRequest = await req.json();
    const { instruction, image, LLM_API_KEY, LLM_MODEL } = request;

    if (!instruction) {
      return new Response(
        JSON.stringify({ error: "instruction is required" }),
        {
          status: 400,
          headers: { "Content-Type": "application/json" },
        }
      );
    }

    if (!image) {
      return new Response(
        JSON.stringify({ error: "image is required" }),
        {
          status: 400,
          headers: { "Content-Type": "application/json" },
        }
      );
    }

    // Create provider instance with manual API key if provided, otherwise use default
    const googleProvider = LLM_API_KEY
      ? createGoogleGenerativeAI({ apiKey: LLM_API_KEY })
      : google;

    const promptText = `Does this photo show a person following this instruction: "${instruction}"? Analyze the image carefully and respond with a JSON object containing:
- "verified" (true if the person is following the instruction correctly, false otherwise)
- "confidence" (a number between 0 and 1)
- "reason" (if verified is false, provide a brief reason why)`;

    const schema = z.object({
      verified: z.boolean(),
      confidence: z.number().min(0).max(1),
      reason: z.string().optional(),
    });

    // Convert base64 to buffer
    const imageBuffer = base64ToBuffer(image);

    // Convert "auto" to a valid model name
    const model = LLM_MODEL === "auto" || !LLM_MODEL ? "gemini-2.5-flash" : LLM_MODEL;
    if(model === "gemini-2.5-flash") {
      throw new Error("Other models disabled for demo purposes");
    }
    const result = await generateObject({
      model: googleProvider(model),
      schema,
      prompt: [
        {
          role: "user",
          content: [
            { type: "text", text: promptText },
            {
              type: "image",
              image: imageBuffer,
            },
          ],
        },
      ],
    });

    return new Response(
      JSON.stringify({
        verified: result.object.verified,
        confidence: result.object.confidence,
        reason: result.object.reason,
      }),
      {
        headers: { "Content-Type": "application/json" },
      }
    );
  } catch (error) {
    console.error("Verify image API error:", error);
    return new Response(
      JSON.stringify({
        error: "Internal server error",
        message: (error as Error).message,
      }),
      {
        status: 500,
        headers: { "Content-Type": "application/json" },
      }
    );
  }
}

Installation

Setup Required: Make sure you have configured components.json first. See the installation guide for setup instructions.

npx shadcn@latest add @gencn-ui/gencn-ui-human-verification
"use client";

import React, { useState, useRef, useCallback, useEffect, type FC } from "react";
import {
  Dialog,
  DialogContent,
  DialogHeader,
  DialogTitle,
} from "@/components/ui/dialog";
import { Alert, AlertTitle, AlertDescription } from "@/components/ui/alert";
import { Button } from "@/components/ui/button";
import { CheckCircle2, XCircle, Loader2, Camera, Info } from "lucide-react";
import { useHumanVerification, HumanVerificationError } from "@/registry/new-york/gencn-ui/items/shared/hooks/internal/use-gencn-ui-human-verification";
import type { ServerFallbackConfig } from "@/registry/new-york/gencn-ui/items/shared/gencn-ui-types";
import type { HumanVerificationContext } from "@/registry/new-york/gencn-ui/items/shared/lib/gencn-ui-human-verification-lib";
import { VideoCapture, type VideoCaptureRef } from "@/registry/new-york/gencn-ui/items/human-verification/components/gencn-ui-video-capture";
import { AudioCapture } from "@/registry/new-york/gencn-ui/items/human-verification/components/gencn-ui-audio-capture";
import { TextInput } from "@/registry/new-york/gencn-ui/items/human-verification/components/gencn-ui-text-input";

export type VerificationType = "text" | "audio" | "image";

export interface GencnUIHumanVerificationProps {
  /**
   * Verification type: "text" for Q&A, "audio" for alphabet sequence, "image" for selfie
   * @default "image"
   */
  type?: VerificationType;

  /**
   * Context for text verification (topic/domain for question generation)
   */
  context?: HumanVerificationContext;

  /**
   * Custom instruction text for image verification (if not provided, will be generated)
   * @default undefined (will be generated)
   */
  instruction?: string;

  /**
   * Prompt to generate instruction for image verification
   * @default "Write a very short, concise instruction for human verification through selfie, ask them to do some gesture in selfie, gesture should be static. Keep it brief, maximum 20 words."
   */
  instructionPrompt?: string;

  /**
   * Callback when verification succeeds
   */
  onVerified?: (confidence: number) => void;

  /**
   * Callback when verification fails
   */
  onVerificationFailed?: () => void;

  /**
   * Callback when an error occurs
   */
  onError?: (error: Error) => void;

  /**
   * Additional className for the container
   */
  className?: string;

  /**
   * Custom button text
   * @default "Verify you are human"
   */
  buttonText?: string;

  /**
   * Server fallback configuration
   */
  serverFallback?: ServerFallbackConfig;
}

type VerificationState =
  | "idle" // Initial state, button visible
  | "generating" // Generating challenge
  | "needs-download" // LLM needs to be downloaded
  | "prompting" // Dialog open, ready for user input
  | "starting-camera" // "Start Camera" clicked, waiting for stream (image only)
  | "countdown" // Camera open, 5s timer running (image only)
  | "verifying" // Verification in progress
  | "failed-attempt" // Verification failed, "Retry" button visible
  | "success" // Final success state, dialog closed
  | "failed-final"; // Final failed state (max attempts), dialog closed

const MAX_ATTEMPTS = 3;

export const GencnUIHumanVerification: FC<GencnUIHumanVerificationProps> = ({
  type = "image",
  context,
  instruction: propInstruction,
  instructionPrompt = "Write a very short, concise instruction for human verification through selfie, ask them to do some gesture in selfie, gesture should be static. Keep it brief, maximum 20 words.",
  onVerified,
  onVerificationFailed,
  onError,
  className,
  buttonText = "Verify you are human",
  serverFallback,
}) => {
  const [state, setState] = useState<VerificationState>("idle");
  const [isDialogOpen, setIsDialogOpen] = useState(false);
  const [statusMessage, setStatusMessage] = useState<string | null>(null);
  const [lastFailureReason, setLastFailureReason] = useState<string | null>(null);
  const [errorDialogMessage, setErrorDialogMessage] = useState<string | null>(null);

  const [generatedChallenge, setGeneratedChallenge] = useState<string | null>(null);
  const [isGeneratingChallenge, setIsGeneratingChallenge] = useState(false);
  const [previousQuestions, setPreviousQuestions] = useState<string[]>([]);

  // Use hook for challenge generation and availability checks
  const humanVerification = useHumanVerification({ serverFallback });
  

  // Get challenge based on type
  const challenge = type === "image" 
    ? (propInstruction || generatedChallenge || "Take your selfie following the instructions below")
    : (generatedChallenge || "");

  // Keep challenge ref up to date
  const challengeRef = useRef<string>("");
  useEffect(() => {
    challengeRef.current = challenge;
  }, [challenge]);

  // VideoCapture ref (for image type)
  const videoCaptureRef = useRef<VideoCaptureRef>(null);

  // Cleanup function
  const fullCleanup = useCallback(() => {
    videoCaptureRef.current?.stop();
  }, []);

  // Cleanup on unmount
  useEffect(() => {
    return () => {
      fullCleanup();
    };
  }, [fullCleanup]);

  // Ensure camera is closed when dialog is closed
  useEffect(() => {
    if (!isDialogOpen) {
      fullCleanup();
    }
  }, [isDialogOpen, fullCleanup]);

  // Check for downloadable LLM when dialog opens


  // Handle dialog open/close
  const handleOpenChange = useCallback(
    (open: boolean) => {
      if (!open) {
        fullCleanup();
        setIsDialogOpen(false);
        setErrorDialogMessage(null);
        if (state !== "success" && state !== "failed-final") {
          setState("idle");
        }
      }
    },
    [fullCleanup, state]
  );

  const closeDialogWithDelay = useCallback(() => {
    setTimeout(() => {
      setIsDialogOpen(false);
    }, 100);
  }, []);

  const startCamera = useCallback(async () => {
    try {
      if (videoCaptureRef.current?.isStreamReady) {
        setState("countdown");
        videoCaptureRef.current?.startCountdown();
        return;
      }

      setStatusMessage("Starting camera...");
      setState("starting-camera");
      await videoCaptureRef.current?.startCamera();
      setState("countdown");
      videoCaptureRef.current?.startCountdown();
    } catch (err) {
      const error = err as Error;
      setStatusMessage(`Failed to access camera: ${error.message}`);
      setState("prompting");
      onError?.(error);
    }
  }, [onError]);

  // Handle verification success
  const handleVerified = useCallback((confidence: number) => {
    setState("success");
    onVerified?.(confidence);
    closeDialogWithDelay();
  }, [onVerified, closeDialogWithDelay]);

  // Handle verification failure (max attempts reached)
  const handleVerificationFailed = useCallback(() => {
    setState("failed-final");
    onVerificationFailed?.();
    closeDialogWithDelay();
  }, [onVerificationFailed, closeDialogWithDelay]);

  // Handle verification errors (from components)
  const handleVerificationError = useCallback((error: Error) => {
    const errorMessage = error.message || "";
    
    if (error instanceof HumanVerificationError || errorMessage.includes("LanguageModel is not available and server fallback is disabled")) {
      setErrorDialogMessage(errorMessage);
      setState("idle");
    } else {
      const reason = errorMessage || "Verification failed. Please try again.";
      setLastFailureReason(reason);
      setStatusMessage(reason);
      setState("failed-attempt");
    }
  }, []);

  // Handle image verification
  const handleImageVerified = useCallback((confidence: number) => {
    handleVerified(confidence);
  }, [handleVerified]);

  const handleImageVerificationFailed = useCallback(() => {
    handleVerificationFailed();
  }, [handleVerificationFailed]);

  // Handle audio verification
  const handleAudioVerified = useCallback((confidence: number) => {
    handleVerified(confidence);
  }, [handleVerified]);

  const handleAudioVerificationFailed = useCallback(() => {
    handleVerificationFailed();
  }, [handleVerificationFailed]);

  // Handle text verification
  const handleTextVerified = useCallback((confidence: number) => {
    // Clear error message on successful verification
    setLastFailureReason(null);
    setStatusMessage(null);
    handleVerified(confidence);
  }, [handleVerified]);

  const handleTextVerificationFailed = useCallback(() => {
    handleVerificationFailed();
  }, [handleVerificationFailed]);

  // Handle text verification errors - regenerate question on failure
  const handleTextVerificationError = useCallback(async (error: Error) => {
    const errorMessage = error.message || "";
    
    if (error instanceof HumanVerificationError || errorMessage.includes("LanguageModel is not available and server fallback is disabled")) {
      setErrorDialogMessage(errorMessage);
      setState("idle");
    } else {
      const reason = errorMessage || "Verification failed. Please try again.";
      setLastFailureReason(reason);
      // Don't set statusMessage to error - keep it separate for status updates
      setState("failed-attempt");
      
      // Regenerate question for text type when verification fails
      if (type === "text") {
        try {
          setIsGeneratingChallenge(true);
          setStatusMessage("Generating new question...");
          setState("generating");
          
          // Generate new question with variation to ensure it's different
          // Add timestamp to topic to encourage variation
          const variationContext = {
            ...context,
            topic: context?.topic 
              ? `${context.topic} (new question ${Date.now()})`
              : `general knowledge (new question ${Date.now()})`,
          };
          
          let generated = "";
          let attempts = 0;
          const maxAttempts = 5;
          
          // Try to generate a different question
          while (attempts < maxAttempts) {
            // Add variation to topic each attempt
            const attemptContext = {
              ...variationContext,
              topic: context?.topic 
                ? `${context.topic} (attempt ${attempts + 1}, ${Date.now()})`
                : `general knowledge (attempt ${attempts + 1}, ${Date.now()})`,
            };
            
            generated = await humanVerification.generateVerification({
              type: "text",
              context: attemptContext,
            });
            
            // Check if it's different from previous questions
            const normalizedGenerated = generated.toLowerCase().trim();
            const isDifferent = !previousQuestions.some(prev => {
              const normalizedPrev = prev.toLowerCase().trim();
              // Check for exact match or very similar (same first few words)
              return normalizedPrev === normalizedGenerated || 
                     normalizedPrev.split(' ').slice(0, 3).join(' ') === normalizedGenerated.split(' ').slice(0, 3).join(' ');
            });
            
            if (isDifferent || previousQuestions.length === 0) {
              break;
            }
            
            attempts++;
            // Add a small delay before retrying
            await new Promise(resolve => setTimeout(resolve, 200));
          }
          
          // Update previous questions list (keep last 3)
          setPreviousQuestions(prev => {
            const updated = [...prev, challenge].slice(-3);
            return updated;
          });
          
          setGeneratedChallenge(generated);
          // Keep the error message visible - don't clear it yet
          // setLastFailureReason(null); // Keep the error message
          setStatusMessage(null);
          setState("prompting");
        } catch (err) {
          const genError = err as Error;
          console.error("[AIHumanVerification] Failed to regenerate question:", genError);
          // Keep the current question if regeneration fails, but reset to prompting state
          setState("prompting");
          setStatusMessage(null);
        } finally {
          setIsGeneratingChallenge(false);
        }
      }
    }
  }, [type, context, humanVerification, challenge, previousQuestions]);

  // Generate challenge
  const generateChallenge = useCallback(async () => {
    // For image type, if propInstruction is provided, skip generation
    if (type === "image" && propInstruction) return;

    try {
      setIsGeneratingChallenge(true);
      const generated = await humanVerification.generateVerification({
        type,
        context,
        instructionPrompt: type === "image" ? instructionPrompt : undefined,
      });

      setGeneratedChallenge(generated);
      
      // Track the first question for text type
      if (type === "text") {
        setPreviousQuestions([generated]);
      }
    } catch (err) {
      const error = err as Error;
      const errorMessage = error.message || "";
      
      if (err instanceof HumanVerificationError || errorMessage.includes("LanguageModel is not available and server fallback is disabled")) {
        setErrorDialogMessage(errorMessage);
        setIsDialogOpen(true);
        setState("idle");
        return;
      } else {
        console.error("[AIHumanVerification] Failed to generate challenge:", error);
        // Set fallback challenge
        if (type === "text") {
          const fallback = "What is the capital of France?";
          setGeneratedChallenge(fallback);
          setPreviousQuestions([fallback]);
        } else if (type === "audio") {
          setGeneratedChallenge("A-B-C-D");
        } else {
          setGeneratedChallenge("take your selfie with your hand covering your mouth.");
        }
        onError?.(error);
      }
    } finally {
      setIsGeneratingChallenge(false);
    }
  }, [type, context, propInstruction, instructionPrompt, onError, humanVerification]);

  // Main button click handler
  const handleStart = useCallback(async () => {
    setState("generating");
    setLastFailureReason(null);
    setStatusMessage(null);
    // Reset previous questions for new verification session
    if (type === "text") {
      setPreviousQuestions([]);
    }

    // Check availability first before trying to generate challenge

    // Generate challenge if needed
    if (!(type === "image" && propInstruction)) {
      await generateChallenge();
    }

    setState("prompting");
    setIsDialogOpen(true);
  }, [type, propInstruction, generateChallenge, humanVerification,]);

  const handleRetry = useCallback(() => {
    if (type === "image") {
      videoCaptureRef.current?.startCountdown();
      setState("countdown");
    } else {
      setState("prompting");
    }
  }, [type]);


  // Render status bar
  const renderStatus = () => {
    switch (state) {
      case "generating":
        return (
          <>
            <Loader2 className="mr-2 h-4 w-4 animate-spin" /> {statusMessage || "Generating question..."}
          </>
        );
      case "starting-camera":
        return (
          <>
            <Loader2 className="mr-2 h-4 w-4 animate-spin" /> {statusMessage}
          </>
        );
      case "countdown":
        return (
          <>
            <Loader2 className="mr-2 h-4 w-4 animate-spin" /> Capturing...
          </>
        );
      case "failed-attempt":
        // Error message is shown in separate Alert, status just shows ready state
        return (
          <>
            <Info className="mr-2 h-4 w-4 text-blue-500" /> Ready to try again.
          </>
        );
      case "prompting":
        return (
          <>
            <Info className="mr-2 h-4 w-4 text-blue-500" /> Ready to start verification.
          </>
        );
      case "needs-download":
        return (
          <>
            <Info className="mr-2 h-4 w-4 text-amber-500" /> {statusMessage}
          </>
        );
      default:
        return null;
    }
  };

  // Render buttons
  const renderButtons = () => {
    if (errorDialogMessage) {
      return (
          <Button onClick={() => setIsDialogOpen(false)} variant="outline">
            Close
          </Button>
      );
    }

    switch (state) {
      case "prompting":
        if (type === "image") {
        return (
          <>
            <Button onClick={() => setIsDialogOpen(false)} variant="outline">
              Cancel
            </Button>
            <Button onClick={startCamera} size="lg">
              <Camera className="mr-2 h-4 w-4" />
              Start Camera
            </Button>
          </>
          );
        }
        return (
          <Button onClick={() => setIsDialogOpen(false)} variant="outline">
            Cancel
          </Button>
        );
      case "starting-camera":
        return (
          <>
            <Button onClick={() => setIsDialogOpen(false)} variant="outline">
              Cancel
            </Button>
            <Button disabled size="lg">
              <Loader2 className="mr-2 h-4 w-4 animate-spin" />
              Starting...
            </Button>
          </>
        );
      case "failed-attempt":
        return (
          <>
            <Button onClick={() => setIsDialogOpen(false)} variant="outline">
              Cancel
            </Button>
            <Button onClick={handleRetry} size="lg">
              Retry
            </Button>
          </>
        );
      case "countdown":
        return (
          <p className="text-muted-foreground w-full text-center text-sm">
            Verification in progress...
          </p>
        );
      case "needs-download":
        return (
            <Button onClick={() => setIsDialogOpen(false)} variant="outline">
              Close
            </Button>
        );
      default:
        return null;
    }
  };

  // Render challenge content based on type
  const renderChallengeContent = () => {
    if (type === "text") {
      return (
        <TextInput
          question={challenge}
          onVerified={handleTextVerified}
          onVerificationFailed={handleTextVerificationFailed}
          onError={onError}
          onVerificationError={handleTextVerificationError}
          onAnswerSubmit={() => {
            // Clear error message when user submits a new answer
            setLastFailureReason(null);
            setStatusMessage(null);
          }}
          serverFallback={serverFallback}
          maxAttempts={MAX_ATTEMPTS}
        />
      );
    } else if (type === "audio") {
      return (
        <AudioCapture
          alphabetSequence={challenge}
          onVerified={handleAudioVerified}
          onVerificationFailed={handleAudioVerificationFailed}
          onError={onError}
          onVerificationError={handleVerificationError}
          isActive={isDialogOpen && (state === "prompting" || state === "failed-attempt")}
          serverFallback={serverFallback}
          maxAttempts={MAX_ATTEMPTS}
        />
      );
    } else {
      // Image type
      return (
        <>
          <Alert variant="default" className="w-full border-blue-200 bg-blue-50 text-left *:text-left dark:border-blue-800 dark:bg-blue-950">
            <Info className="h-4 w-4 text-blue-600 dark:text-blue-400" />
            <AlertTitle className="text-left font-semibold text-blue-800 dark:text-blue-200">
              Follow this instruction:
            </AlertTitle>
            <AlertDescription className="text-left text-blue-700 *:text-left dark:text-blue-300">
              {challenge}
            </AlertDescription>
          </Alert>

          <VideoCapture
            ref={videoCaptureRef}
            instruction={challenge}
            onVerified={handleImageVerified}
            onVerificationFailed={handleImageVerificationFailed}
            onError={onError}
            onVerificationError={handleVerificationError}
            isActive={isDialogOpen && (state === "prompting" || state === "starting-camera" || state === "countdown" || state === "failed-attempt")}
            serverFallback={serverFallback}
            maxAttempts={MAX_ATTEMPTS}
          />
        </>
      );
    }
  };

  return (
    <div
      className={className}
      style={{ width: "100%", maxWidth: "500px", margin: "0 auto" }}
    >
      {(!isDialogOpen && state !== "success" && state !== "failed-final") && (
        <div className="space-y-4 text-center">
          <Button
            onClick={handleStart}
            disabled={state === "generating" || isGeneratingChallenge}
            size="lg"
            className="min-w-[200px]"
          >
            {state === "generating" || isGeneratingChallenge ? (
              <>
                <Loader2 className="mr-2 h-4 w-4 animate-spin" />
                Please wait...
              </>
            ) : (
              buttonText
            )}
          </Button>
        </div>
      )}

      {state === "success" && (
        <Alert className="border-green-500 bg-green-50 dark:bg-green-950">
          <CheckCircle2 className="h-5 w-5 text-green-600 dark:text-green-400" />
          <AlertTitle className="text-lg font-semibold text-green-800 dark:text-green-200">
            Verification Successful!
          </AlertTitle>
          <AlertDescription className="mt-2 text-green-700 dark:text-green-300">
            You have been successfully verified as human.
          </AlertDescription>
        </Alert>
      )}

      {state === "failed-final" && (
        <Alert variant="destructive">
          <XCircle className="h-5 w-5" />
          <AlertTitle className="text-lg font-semibold">
            Verification Failed
          </AlertTitle>
          <AlertDescription className="mt-2">
            <p>We could not verify you</p>
            {lastFailureReason && (
              <p className="mt-2 text-sm">
                <strong>Reason:</strong> {lastFailureReason}
              </p>
            )}
          </AlertDescription>
        </Alert>
      )}

      <Dialog open={isDialogOpen} onOpenChange={handleOpenChange}>
        <DialogContent className="gap-0 p-0" showCloseButton={true}>
          <DialogHeader className="pt-6 pr-6 pb-4 pl-6 text-left">
            <DialogTitle className="text-left">Human Verification</DialogTitle>
          </DialogHeader>

          <div className="space-y-4 pr-6 pb-4 pl-6 text-left">
            {errorDialogMessage ? (
              <Alert variant="destructive" className="w-full text-left *:text-left">
                <XCircle className="h-4 w-4" />
                <AlertTitle className="text-left font-semibold">
                  Language Model Not Available
                </AlertTitle>
                <AlertDescription className="text-left *:text-left">
                  {errorDialogMessage}
                </AlertDescription>
              </Alert>
            ) : (
              <>
                <div className="text-muted-foreground flex h-6 items-center text-sm font-medium">
                  {renderStatus()}
                </div>

                {/* Show error message in separate section when verification fails */}
                {/* Keep error visible even when generating new question */}
                {lastFailureReason && (state === "failed-attempt" || state === "generating" || state === "prompting") && (
                  <Alert variant="destructive" className="w-full text-left *:text-left">
                    <XCircle className="h-4 w-4" />
                    <AlertTitle className="text-left font-semibold">
                      Verification Failed
                    </AlertTitle>
                    <AlertDescription className="text-left *:text-left">
                      {lastFailureReason}
                    </AlertDescription>
                  </Alert>
                )}

                {state === "needs-download" ? (
                  <Alert variant="default" className="w-full border-amber-200 bg-amber-50 text-left *:text-left dark:border-amber-800 dark:bg-amber-950">
                    <Info className="h-4 w-4 text-amber-600 dark:text-amber-400" />
                    <AlertTitle className="text-left font-semibold text-amber-800 dark:text-amber-200">
                      Language Model Required
                    </AlertTitle>
                    <AlertDescription className="text-left text-amber-700 *:text-left dark:text-amber-300">
                      <p className="mb-2">
                        The language model needs to be downloaded to use human verification.
                      </p>
                      <p className="text-sm">
                        Please check the download widget to start the download. Once the model is downloaded, you can try again.
                      </p>
                    </AlertDescription>
                  </Alert>
                ) : (
                  renderChallengeContent()
                )}
              </>
            )}
          </div>

          <div className="bg-muted/50 flex items-center justify-end gap-3 border-t pt-4 pr-6 pb-6 pl-6">
            {renderButtons()}
          </div>
        </DialogContent>
      </Dialog>
    </div>
  );
};

Component API