GenCN UI

Loader Message

GencnUI-powered loader message component with dynamic rotating text using tone, length, format, and rotation options.

Try out the component below to see how it automatically generates and rotates through multiple message variations.

Loading preview...
"use client";

import { GencnUILoaderMessage } from "@/registry/new-york/gencn-ui/items/loader-message/gencn-ui-loader-message";
import { Button } from "@/components/ui/button";
import {
  AlertDialog,
  AlertDialogCancel,
  AlertDialogContent,
  AlertDialogDescription,
  AlertDialogFooter,
  AlertDialogHeader,
  AlertDialogTitle,
  AlertDialogTrigger,
} from "@/components/ui/alert-dialog";
import { Spinner } from "@/components/ui/spinner";
import * as React from "react";

export function GencnUILoaderMessageExample() {
  return (
    <div className="space-y-4">
      <AlertDialog>
        <AlertDialogTrigger asChild>
          <Button variant="outline">Show Loader Message</Button>
        </AlertDialogTrigger>
        <AlertDialogContent>
          <AlertDialogHeader>
            <AlertDialogTitle>Submitting form data (example)</AlertDialogTitle>
          </AlertDialogHeader>
          <AlertDialogDescription>
            <span className="inline-flex items-center gap-2">
              <Spinner className="size-4" />
              <GencnUILoaderMessage
                text="Your request is being processed..."
                tone="funny"
                numberOfMessages={3}
                duration={3000}
                loopBack={true}
              />
            </span>
          </AlertDialogDescription>
          <AlertDialogFooter>
            <AlertDialogCancel>Cancel</AlertDialogCancel>
          </AlertDialogFooter>
        </AlertDialogContent>
      </AlertDialog>
    </div>
  );
}

Server API

Path:
/api/structured
Source:
import { google, createGoogleGenerativeAI } from "@ai-sdk/google";
import { generateObject, streamObject } from "ai";
import { z } from "zod";

// Allow streaming responses up to 30 seconds
export const maxDuration = 30;

interface StructuredRequest {
  prompt: string;
  schema: Record<string, unknown>;
  omitResponseConstraintInput?: boolean;
  streaming?: boolean;
  LLM_API_KEY?: string;
}

// Convert JSON Schema to Zod schema
function jsonSchemaToZod(schema: Record<string, unknown>): z.ZodTypeAny {
  if (!schema || typeof schema !== "object") {
    return z.any();
  }

  if (schema.type === "object") {
    const shape: Record<string, z.ZodTypeAny> = {};
    if (schema.properties) {
      for (const [key, prop] of Object.entries(
        schema.properties as Record<string, Record<string, unknown>>
      )) {
        const fieldSchema = jsonSchemaToZod(prop);
        // Make optional if not in required array
        if (
          schema.required &&
          Array.isArray(schema.required) &&
          schema.required.includes(key)
        ) {
          shape[key] = fieldSchema;
        } else {
          shape[key] = fieldSchema.optional();
        }
      }
    }
    return z.object(shape);
  } else if (schema.type === "array") {
    const itemSchema = schema.items && typeof schema.items === "object" && !Array.isArray(schema.items)
      ? jsonSchemaToZod(schema.items as Record<string, unknown>)
      : z.any();
    let arraySchema = z.array(itemSchema);

    // Handle minItems and maxItems
    if (typeof schema.minItems === "number") {
      arraySchema = arraySchema.min(schema.minItems) as z.ZodArray<z.ZodTypeAny>;
    }
    if (typeof schema.maxItems === "number") {
      arraySchema = arraySchema.max(schema.maxItems) as z.ZodArray<z.ZodTypeAny>;
    }

    return arraySchema;
  } else if (schema.type === "string") {
    let stringSchema = z.string();
    if (typeof schema.minLength === "number") {
      stringSchema = stringSchema.min(schema.minLength);
    }
    if (typeof schema.maxLength === "number") {
      stringSchema = stringSchema.max(schema.maxLength);
    }
    if (schema.enum && Array.isArray(schema.enum)) {
      return z.enum(schema.enum as [string, ...string[]]);
    }
    return stringSchema;
  } else if (schema.type === "number") {
    let numberSchema = z.number();
    if (typeof schema.minimum === "number") {
      numberSchema = numberSchema.min(schema.minimum);
    }
    if (typeof schema.maximum === "number") {
      numberSchema = numberSchema.max(schema.maximum);
    }
    return numberSchema;
  } else if (schema.type === "boolean") {
    return z.boolean();
  } else if (schema.type === "integer") {
    let intSchema = z.number().int();
    if (typeof schema.minimum === "number") {
      intSchema = intSchema.min(schema.minimum);
    }
    if (typeof schema.maximum === "number") {
      intSchema = intSchema.max(schema.maximum);
    }
    return intSchema;
  }

  return z.any();
}

export async function POST(req: Request) {
  try {
    const request: StructuredRequest = await req.json();
    const { prompt, schema, streaming = false, LLM_API_KEY } = request;

    if (!prompt) {
      return new Response(JSON.stringify({ error: "Prompt is required" }), {
        status: 400,
        headers: { "Content-Type": "application/json" },
      });
    }

    if (!schema) {
      return new Response(JSON.stringify({ error: "Schema is required" }), {
        status: 400,
        headers: { "Content-Type": "application/json" },
      });
    }

    // Create provider instance with manual API key if provided, otherwise use default
    const googleProvider = LLM_API_KEY
      ? createGoogleGenerativeAI({ apiKey: LLM_API_KEY })
      : google;

    // Convert JSON Schema to Zod schema
    const zodSchema = jsonSchemaToZod(schema);

    if (streaming) {
      const result = streamObject({
        model: googleProvider("gemini-2.0-flash-exp"),
        schema: zodSchema,
        prompt,
      });

      // Use toTextStreamResponse() for streaming structured objects
      // This is the standard way per Vercel AI SDK documentation
      return result.toTextStreamResponse();
    } else {
      const result = await generateObject({
        model: googleProvider("gemini-2.0-flash-exp"),
        schema: zodSchema,
        prompt,
      });

      return new Response(JSON.stringify(result.object), {
        headers: { "Content-Type": "application/json" },
      });
    }
  } catch (error) {
    console.error("Structured API error:", error);
    return new Response(
      JSON.stringify({
        error: "Internal server error",
        message: (error as Error).message,
      }),
      {
        status: 500,
        headers: { "Content-Type": "application/json" },
      }
    );
  }
}

Installation

Setup Required: Make sure you have configured components.json first. See the installation guide for setup instructions.

npx shadcn@latest add @gencn-ui/gencn-ui-loader-message
"use client";
import { useState, useRef, useCallback, useEffect, useMemo } from "react";
import { useStructuredAPI } from "@/registry/new-york/gencn-ui/items/shared/hooks/internal/use-gencn-ui-structured-api";
import type { AITone } from "@/registry/new-york/gencn-ui/items/shared/gencn-ui-types";

export interface GencnUILoaderMessageProps {
  text: string;
  tone?: AITone;
  length?: "shorter" | "as-is" | "longer";
  format?: "as-is" | "markdown" | "plain-text";
  context?: string;
  numberOfMessages?: number; // Default: 3
  duration?: number; // Duration in milliseconds, default: 3000
  loopBack?: boolean; // Whether to loop back to first message after last, default: false
}

export function GencnUILoaderMessage({
  text,
  tone,
  length = "as-is",
  format = "as-is",
  context,
  numberOfMessages = 3,
  duration = 3000,
  loopBack = false,
}: GencnUILoaderMessageProps) {
  // Clamp numberOfMessages to maximum of 10
  const clampedNumberOfMessages = Math.min(
    Math.max(1, numberOfMessages || 3),
    10
  );

  const [messages, setMessages] = useState<string[]>([]);
  const [currentIndex, setCurrentIndex] = useState<number>(0);
  const [isGenerating, setIsGenerating] = useState<boolean>(false);
  const [isMounted, setIsMounted] = useState<boolean>(false);
  const intervalRef = useRef<NodeJS.Timeout | null>(null);
  const abortRef = useRef<AbortController | null>(null);
  const isGeneratingRef = useRef<boolean>(false);
  const lastGeneratedKeyRef = useRef<string | null>(null);

  // Create JSON schema for message variations
  const schema = useMemo(() => {
    return {
      type: "object",
      properties: {
        messages: {
          type: "array",
          minItems: clampedNumberOfMessages,
          maxItems: clampedNumberOfMessages,
          items: {
            type: "object",
            properties: {
              text: {
                type: "string",
                description: "A unique variation of the original message text",
              },
            },
            required: ["text"],
          },
        },
      },
      required: ["messages"],
    };
  }, [clampedNumberOfMessages]);

  // Use structured API hook for generating multiple message variations
  const structuredAPI = useStructuredAPI({
    schema,
    omitResponseConstraintInput: true,
    serverFallback: {
      api: "/api/structured",
    },
  });

  // Mark component as mounted (client-side only)
  useEffect(() => {
    setIsMounted(true);
  }, []);

  // Build prompt for generating message variations
  const buildPrompt = useCallback(() => {
    const instructions: string[] = [];

    // Use sonner's tone instruction style
    if (tone) {
      instructions.push(
        `Rephrase the following text in a ${tone} tone while preserving intent.`
      );
    } else {
      instructions.push(
        "Rephrase the following text to improve clarity while preserving intent."
      );
    }

    if (length !== "as-is") {
      instructions.push(`Length: ${length}`);
    }
    if (format !== "as-is") {
      instructions.push(`Format: ${format}`);
    }

    const baseInstruction = instructions.join(" ");
    const contextText = context ? `\n\nAdditional context: ${context}` : "";

    return `Original text: "${text}"

${baseInstruction}${contextText}

Generate exactly ${clampedNumberOfMessages} unique variations of this text. Each variation must be:
- A complete, independent message
- Distinctly different from all other variations
- Using different words, phrasing, and sentence structure
- Maintaining the same core meaning and intent
- Appropriate for use as a loading/processing message

Return a JSON object with a "messages" array, where each message has a "text" property containing one unique variation.`;
  }, [text, tone, length, format, context, clampedNumberOfMessages]);

  // Generate all messages upfront using structured API
  const generateMessages = useCallback(async () => {
    if (!isMounted || !text.trim() || isGeneratingRef.current) {
      return;
    }

    // Create a unique key for this generation request
    const generationKey = `${text}|${tone}|${length}|${format}|${context || ""}|${clampedNumberOfMessages}`;

    // If we've already generated for this exact configuration, don't regenerate
    if (lastGeneratedKeyRef.current === generationKey && messages.length > 0) {
      return;
    }

    isGeneratingRef.current = true;
    setIsGenerating(true);
    const controller = new AbortController();
    abortRef.current = controller;

    try {
      const promptText = buildPrompt();

      // Generate all message variations in a single structured API call
      const result = await structuredAPI.prompt(promptText);

      // Extract messages from the structured response
      if (result && result.messages && Array.isArray(result.messages)) {
        const validMessages: string[] = result.messages
          .filter(
            (m: any) => m && typeof m.text === "string" && m.text.trim() !== ""
          )
          .map((m: any) => m.text.trim())
          .slice(0, clampedNumberOfMessages);

        // Remove duplicates (though structured API should generate unique ones)
        const uniqueMessages = Array.from(new Set(validMessages));

        if (!controller.signal.aborted && uniqueMessages.length > 0) {
          setMessages(uniqueMessages);
          setCurrentIndex(0);
          lastGeneratedKeyRef.current = generationKey;
        } else if (!controller.signal.aborted) {
          // If all messages failed, keep original text as fallback
          setMessages([text]);
          setCurrentIndex(0);
          lastGeneratedKeyRef.current = generationKey;
        }
      } else {
        // Invalid response format, fallback to original text
        if (!controller.signal.aborted) {
          setMessages([text]);
          setCurrentIndex(0);
          lastGeneratedKeyRef.current = generationKey;
        }
      }
    } catch (e) {
      // On error, fallback to original text
      console.warn("[LoaderMessage] Failed to generate messages:", e);
      if (!controller.signal.aborted) {
        setMessages([text]);
        setCurrentIndex(0);
        lastGeneratedKeyRef.current = generationKey;
      }
    } finally {
      if (!controller.signal.aborted) {
        setIsGenerating(false);
        isGeneratingRef.current = false;
      }
      if (abortRef.current === controller) {
        abortRef.current = null;
      }
    }
  }, [
    text,
    tone,
    length,
    format,
    context,
    clampedNumberOfMessages,
    structuredAPI,
    isMounted,
    buildPrompt,
  ]);

  // Reset state when text or options change
  useEffect(() => {
    const generationKey = `${text}|${tone}|${length}|${format}|${context || ""}|${clampedNumberOfMessages}`;
    // Only reset if the key actually changed
    if (lastGeneratedKeyRef.current !== generationKey) {
      setMessages([]);
      setCurrentIndex(0);
      setIsGenerating(false);
      isGeneratingRef.current = false;
      lastGeneratedKeyRef.current = null;
      if (intervalRef.current) {
        clearInterval(intervalRef.current);
        intervalRef.current = null;
      }
      if (abortRef.current) {
        abortRef.current.abort();
        abortRef.current = null;
      }
    }
  }, [
    text,
    tone,
    length,
    format,
    context,
    clampedNumberOfMessages,
    duration,
    loopBack,
  ]);

  // Generate messages when component mounts or when generation key changes
  useEffect(() => {
    if (isMounted && text.trim()) {
      const generationKey = `${text}|${tone}|${length}|${format}|${context || ""}|${clampedNumberOfMessages}`;
      // Only generate if we haven't generated for this key yet
      if (lastGeneratedKeyRef.current !== generationKey) {
        void generateMessages();
      }
    }
  }, [
    isMounted,
    text,
    tone,
    length,
    format,
    context,
    clampedNumberOfMessages,
    generateMessages,
  ]);

  // Set up rotation interval
  useEffect(() => {
    // Only start rotation if we have messages and more than one message
    if (messages.length <= 1) {
      return;
    }

    // Clear any existing interval
    if (intervalRef.current) {
      clearInterval(intervalRef.current);
    }

    // Set up rotation interval
    intervalRef.current = setInterval(() => {
      setCurrentIndex((prevIndex) => {
        const nextIndex = prevIndex + 1;

        // If we've reached the last message
        if (nextIndex >= messages.length) {
          if (loopBack) {
            // Loop back to first message
            return 0;
          } else {
            // Stay on last message
            return prevIndex;
          }
        }

        return nextIndex;
      });
    }, duration);

    // Cleanup interval on unmount or when dependencies change
    return () => {
      if (intervalRef.current) {
        clearInterval(intervalRef.current);
        intervalRef.current = null;
      }
    };
  }, [messages, duration, loopBack]);

  // Determine what to display
  // Show existing messages if available, even while generating new ones
  // Only show original text if we have no messages at all
  const displayText =
    messages.length > 0
      ? (messages[currentIndex] ?? messages[0] ?? text)
      : text; // Fallback to original text only if no messages exist

  return (
    <span className="inline-flex items-center">
      <span>{displayText}</span>
    </span>
  );
}

Component API

Usage Patterns

The component automatically generates multiple variations of the original text using the rewriter API. It shows the original text while generating, then rotates through the generated messages at the specified interval. This provides dynamic, engaging loader text for end users.

Key Features

  • Automatic Generation: Generates all message variations upfront in parallel for optimal performance
  • Error Handling: Automatically skips messages that fail to generate, ensuring smooth rotation
  • Flexible Rotation: Configure rotation duration and whether to loop back to the first message
  • Graceful Fallback: Falls back to original text if all message generation fails

Hybrid LLM Support

This component automatically uses the Chrome Rewriter API when available, and falls back to server-side LLM rewriting when the Rewriter API is not supported or unavailable. This ensures rewriting works in all browsers and environments.