GenCN UI

Chat

The extended vercel ai-sdk react hook supporting on-device chrome and server llm keeping the same interface and functionality as the ai-sdk react.

Try out the hook below to see how it seamlessly switches between local Chrome AI and remote API endpoints while maintaining full compatibility with Vercel AI SDK.

Loading preview...
"use client";

import * as React from "react";
import { useState, useEffect, useRef } from "react";
import { useGencnUIChat } from "@/registry/new-york/gencn-ui/items/shared/hooks/use-gencn-ui-chat";
import { useGencnUI } from "@/registry/new-york/gencn-ui/items/shared/hooks/use-gencn-ui";
import { Button } from "@/components/ui/button";
import { Textarea } from "@/components/ui/textarea";
import {
  Card,
  CardContent,
  CardDescription,
  CardHeader,
  CardTitle,
} from "@/components/ui/card";
import { Badge } from "@/components/ui/badge";
import { Spinner } from "@/components/ui/spinner";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Input } from "@/components/ui/input";
import {
  Select,
  SelectContent,
  SelectItem,
  SelectTrigger,
  SelectValue,
} from "@/components/ui/select";
import { Wifi, WifiOff, Zap, AlertCircle } from "lucide-react";

export function UseGencnUIChatExample() {
  const [input, setInput] = useState("");

  const {
    messages,
    sendMessage,
    status,
    transportMode,
    isLocalSupported,
    localAvailability,
    setTransportMode,
    error,
    clearError,
  } = useGencnUIChat({
    autoUseLocalIfAvailable: true,
  });

  const {
    enableServerLLM,
    dangerouslySetServerLLMKey,
    serverKey,
    serverModel,
    setServerModel,
    setServerKey,
  } = useGencnUI();

  // Clear error when API key changes (user might have fixed it)
  const prevServerKeyRef = useRef(serverKey);
  useEffect(() => {
    if (prevServerKeyRef.current !== serverKey && error) {
      clearError();
    }
    prevServerKeyRef.current = serverKey;
  }, [serverKey, error, clearError]);

  const handleSubmit = (e: React.FormEvent | React.KeyboardEvent) => {
    e.preventDefault();
    if (!input.trim()) return;

    // Clear any previous errors when attempting to send a new message
    if (error) {
      clearError();
    }

    // Only prevent sending if currently submitting or streaming (not if in error state)
    if (status === "submitted" || status === "streaming") return;

    sendMessage({ text: input }, { body: { LLM_API_KEY: serverKey } });
    setInput("");
  };

  const isLoading = status === "submitted" || status === "streaming";

  return (
    <Card className="max-w-2xl">
      <CardHeader>
        <CardTitle className="flex items-center gap-2">
          <Zap className="h-5 w-5" />
          Hybrid LLM Chat
        </CardTitle>
        <CardDescription>
          Chat using local Chrome AI or remote server with automatic fallback
        </CardDescription>
      </CardHeader>
      <CardContent className="space-y-4">
        {/* Transport Mode Controls */}
        <div className="bg-muted/50 flex items-center justify-between rounded-lg border p-3">
          <div className="flex items-center gap-2">
            <span className="text-sm font-medium">Transport Mode:</span>
            <Badge
              variant={transportMode === "local" ? "default" : "secondary"}
            >
              {transportMode === "local" ? (
                <span className="flex items-center gap-1">
                  <Wifi className="h-3 w-3" />
                  Local
                </span>
              ) : (
                <span className="flex items-center gap-1">
                  <WifiOff className="h-3 w-3" />
                  Remote
                </span>
              )}
            </Badge>
          </div>
          <div className="flex gap-2">
            {isLocalSupported &&
              transportMode === "remote" &&
              localAvailability !== "unavailable" && (
                <Button
                  size="sm"
                  variant="outline"
                  onClick={() => setTransportMode("local")}
                  disabled={localAvailability === "checking"}
                >
                  Switch to Local
                </Button>
              )}
            {transportMode === "local" && (
              <Button
                size="sm"
                variant="outline"
                onClick={() => setTransportMode("remote")}
              >
                Switch to Remote
              </Button>
            )}
          </div>
        </div>

        {/* Availability Status */}
        {localAvailability === "checking" && (
          <Alert>
            <Spinner className="mr-2 h-4 w-4" />
            <AlertDescription>
              Checking local AI availability...
            </AlertDescription>
          </Alert>
        )}
        {localAvailability === "available" && transportMode === "local" && (
          <Alert className="border-green-500 bg-green-50 dark:bg-green-950">
            <AlertDescription className="text-green-700 dark:text-green-300">
              ✓ Local AI is available and ready
            </AlertDescription>
          </Alert>
        )}
        {localAvailability === "downloadable" && transportMode === "local" && (
          <Alert className="border-yellow-500 bg-yellow-50 dark:bg-yellow-950">
            <AlertDescription className="text-yellow-700 dark:text-yellow-300">
              ⚠ Local AI model needs to be downloaded. First message will
              trigger download.
            </AlertDescription>
          </Alert>
        )}
        {localAvailability === "unavailable" && (
          <Alert className="border-orange-500 bg-orange-50 dark:bg-orange-950">
            <AlertDescription className="text-orange-700 dark:text-orange-300">
              Local AI is not available, using remote server
            </AlertDescription>
          </Alert>
        )}

        {/* Error Display */}
        {error && (
          <Alert variant="destructive">
            <AlertDescription>{error.message}</AlertDescription>
          </Alert>
        )}

        {/* Messages */}
        <div className="bg-muted/30 max-h-96 space-y-2 overflow-y-auto rounded-lg border p-4">
          {(!messages || messages.length === 0) && (
            <div className="text-muted-foreground py-8 text-center">
              <p className="text-sm">Start a conversation...</p>
              <p className="mt-1 text-xs">
                Try asking a question or having a chat!
              </p>
            </div>
          )}
          {messages?.map((message) => (
            <div
              key={message.id}
              className={`rounded-lg p-3 ${
                message.role === "user"
                  ? "bg-muted text-muted-foreground border-primary/80 ml-auto max-w-[85%] border"
                  : "bg-muted mr-auto max-w-[85%]"
              }`}
            >
              <div className="flex items-start gap-2">
                <div className="flex-1">
                  <p className="mb-0! text-sm wrap-break-word whitespace-pre-wrap">
                    {message.parts?.map((part, index) => {
                      if (part.type === "text") {
                        return <span key={index}>{part.text.trim()}</span>;
                      }
                      return null;
                    })}
                  </p>
                </div>
              </div>
            </div>
          ))}
          {isLoading && (
            <div className="text-muted-foreground flex items-center gap-2">
              <Spinner className="h-4 w-4" />
              <span className="text-sm">Thinking...</span>
            </div>
          )}
        </div>

        {/* Server LLM Configuration (shown when in remote mode) */}
        {transportMode === "remote" &&
          enableServerLLM &&
          dangerouslySetServerLLMKey && (
            <div className="bg-muted/30 space-y-2 rounded-lg border p-3">
              <div className="flex items-center gap-2">
                <span className="text-sm font-medium">
                  Server LLM Configuration:
                </span>
              </div>
              <div className="flex items-center gap-2">
                {serverModel && (
                  <Select
                    value={serverModel.modelCode}
                    onValueChange={(value) => {
                      setServerModel({ modelCode: value, modelLabel: serverModel.modelLabel });
                    }}
                  >
                    <SelectTrigger className="w-[180px]">
                      <SelectValue placeholder="Select model" />
                    </SelectTrigger>
                    <SelectContent>
                      <SelectItem
                        key={serverModel.modelCode}
                        value={serverModel.modelCode}
                      >
                        {serverModel.modelLabel}
                      </SelectItem>
                    </SelectContent>
                  </Select>
                )}
                <Input
                  type="text"
                  placeholder="Server Key"
                  value={serverKey}
                  onChange={(e) => setServerKey(e.target.value)}
                  className="flex-1"
                />
              </div>
              <Alert>
                <AlertCircle className="size-4" />
                <AlertDescription className="text-destructive text-xs">
                  This key is not stored on server and purely used for
                  showcasing hybrid LLM on non supported browsers or cases where
                  Server LLM API is required. In production, this key should be
                  directly stored on the server.
                </AlertDescription>
              </Alert>
            </div>
          )}

        {/* Input Form */}
        <form onSubmit={handleSubmit} className="space-y-2">
          <Textarea
            value={input}
            onChange={(e) => setInput(e.target.value)}
            onKeyDown={(e) => {
              if (e.key === "Enter" && !e.shiftKey) {
                e.preventDefault();
                handleSubmit(e);
              }
            }}
            placeholder="Type your message..."
            rows={3}
            disabled={isLoading}
            className="resize-none"
          />
          <Button
            type="submit"
            className="w-full"
            disabled={isLoading || !input.trim()}
          >
            {isLoading ? (
              <>
                <Spinner className="mr-2 h-4 w-4" />
                Sending...
              </>
            ) : (
              "Send Message"
            )}
          </Button>
        </form>
      </CardContent>
    </Card>
  );
}

Server API

Path:
/api/chat
Source:
import { google, createGoogleGenerativeAI } from "@ai-sdk/google";
import { convertToModelMessages, streamText, UIMessage } from "ai";

// Allow streaming responses up to 30 seconds
export const maxDuration = 30;

export async function POST(req: Request) {
  const {
    messages,
    system,
    LLM_API_KEY,
  }: { messages: UIMessage[]; system?: string; LLM_API_KEY?: string } =
    await req.json();

  // Create provider instance with manual API key if provided, otherwise use default
  const googleProvider = LLM_API_KEY
    ? createGoogleGenerativeAI({ apiKey: LLM_API_KEY })
    : google;

  const result = streamText({
    model: googleProvider("gemini-2.0-flash-exp"),
    system: system || "You are a helpful assistant.",
    maxOutputTokens: 1000,
    messages: convertToModelMessages(messages),
  });

  return result.toUIMessageStreamResponse();
}

Overview

The use-gencn-ui-chat hook seamlessly integrates with the Vercel AI SDK and extends the useChat hook to provide a hybrid AI chat experience. It automatically switches between local Chrome AI (using the LanguageModel API) and remote API endpoints while maintaining full compatibility with Vercel AI SDK's text chat message format and APIs.

This hook is built on top of useChat from @ai-sdk/react, so it returns all the same properties and methods you're familiar with, plus additional functionality for transport switching. It intelligently detects browser support, checks model availability, and preserves message history when switching between transport modes.

Installation

Setup Required: Make sure you have configured components.json first. See the installation guide for setup instructions.

npx shadcn@latest add @gencn-ui/use-gencn-ui-chat
"use client";

import {
  useChat,
  type UseChatHelpers,
  type UseChatOptions,
} from "@ai-sdk/react";
import { DefaultChatTransport, type UIMessage } from "ai";
import { useEffect, useMemo, useRef, useState } from "react";
import {
  LocalChatTransport,
  type LocalChatTransportOptions,
} from "@/registry/new-york/gencn-ui/items/shared/lib/gencn-ui-local-chat-transport";

export type TransportMode = "local" | "remote";

export interface UseGencnUIChatOptions
  extends Omit<UseChatOptions<UIMessage>, "transport" | "id"> {
  /**
   * Initial transport mode. Defaults to 'remote' if local is not available.
   */
  initialMode?: TransportMode;
  /**
   * Options for local transport when using Chrome LanguageModel API
   */
  localTransportOptions?: LocalChatTransportOptions;
  /**
   * API endpoint for remote transport. Defaults to '/api/chat'
   */
  remoteApiEndpoint?: string;
  /**
   * Whether to automatically use local transport if available
   */
  autoUseLocalIfAvailable?: boolean;
  /**
   * Chat ID prefix. The actual ID will be prefixed with the transport mode.
   */
  chatIdPrefix?: string;
}

export interface UseGencnUIChatReturn extends UseChatHelpers<UIMessage> {
  /**
   * Current transport mode ('local' or 'remote')
   */
  transportMode: TransportMode;
  /**
   * Whether local LLM is supported in this browser
   */
  isLocalSupported: boolean;
  /**
   * Whether local LLM is available (may need download)
   */
  localAvailability: "available" | "downloadable" | "unavailable" | "checking";
  /**
   * Switch to a different transport mode
   */
  setTransportMode: (mode: TransportMode) => void;
  /**
   * Current transport instance
   */
  transport: LocalChatTransport | DefaultChatTransport<UIMessage>;
}

export function useGencnUIChat(
  options: UseGencnUIChatOptions = {}
): UseGencnUIChatReturn {
  const {
    initialMode,
    localTransportOptions = {},
    remoteApiEndpoint = "/api/chat",
    autoUseLocalIfAvailable = false,
    chatIdPrefix = "chat",
    ...useChatOptions
  } = options;

  // State for transport mode
  const [transportMode, setTransportModeState] = useState<TransportMode>(() => {
    // We'll check support and set mode after mount to avoid hydration mismatch
    return initialMode || "remote";
  });

  // State for local LLM support and availability
  const [isLocalSupported, setIsLocalSupported] = useState(false);
  const [localAvailability, setLocalAvailability] = useState<
    "available" | "downloadable" | "unavailable" | "checking"
  >("checking");

  // Preserve messages across transport switches
  const preservedMessagesRef = useRef<UIMessage[]>([]);
  const prevTransportModeRef = useRef<TransportMode>(transportMode);
  const prevTransportModeForErrorRef = useRef<TransportMode>(transportMode);

  // Check local LLM support and availability after mount (client-only)
  useEffect(() => {
    const checkLocalSupport = async () => {
      const supported = LocalChatTransport.isSupported();
      setIsLocalSupported(supported);

      if (supported) {
        setLocalAvailability("checking");
        try {
          const availability = await LocalChatTransport.checkAvailability({
            expectedInputs: localTransportOptions.expectedInputs,
            expectedOutputs: localTransportOptions.expectedOutputs,
          });
          setLocalAvailability(availability);

          // Auto-enable local if available and autoUseLocalIfAvailable is true
          if (
            autoUseLocalIfAvailable &&
            availability !== "unavailable" &&
            initialMode !== "remote"
          ) {
            setTransportModeState("local");
          }
        } catch {
          setLocalAvailability("unavailable");
        }
      } else {
        setLocalAvailability("unavailable");
      }
    };

    checkLocalSupport();
  }, [
    autoUseLocalIfAvailable,
    initialMode,
    localTransportOptions.expectedInputs,
    localTransportOptions.expectedOutputs,
  ]);

  // Determine which transport to use
  const shouldUseLocal = useMemo(() => {
    return (
      transportMode === "local" &&
      isLocalSupported &&
      localAvailability !== "unavailable"
    );
  }, [transportMode, isLocalSupported, localAvailability]);

  // Create transport based on mode
  const transport = useMemo(() => {
    if (shouldUseLocal) {
      return new LocalChatTransport({
        system: "You are a helpful assistant.",
        temperature: 1.0,
        ...localTransportOptions,
      });
    } else {
      return new DefaultChatTransport({
        api: remoteApiEndpoint,
      });
    }
    // eslint-disable-next-line react-hooks/exhaustive-deps
  }, [shouldUseLocal, remoteApiEndpoint]);

  // Generate unique chat ID based on transport mode
  const chatId = useMemo(() => {
    return `${chatIdPrefix}-${transportMode}`;
  }, [chatIdPrefix, transportMode]);

  // Use the useChat hook with the appropriate transport
  const chatHelpers = useChat({
    ...useChatOptions,
    transport,
    id: chatId,
  });

  // Preserve messages before switching transports
  useEffect(() => {
    if (chatHelpers.messages.length > 0) {
      preservedMessagesRef.current = chatHelpers.messages;
    }
  }, [chatHelpers.messages]);

  // Restore messages when switching transports
  useEffect(() => {
    // Only restore if we just switched transports and messages were cleared
    if (
      prevTransportModeRef.current !== transportMode &&
      preservedMessagesRef.current.length > 0 &&
      chatHelpers.messages.length === 0 &&
      chatHelpers.status === "ready"
    ) {
      chatHelpers.setMessages(preservedMessagesRef.current);
    }
    prevTransportModeRef.current = transportMode;
  }, [
    transportMode,
    chatHelpers.messages.length,
    chatHelpers.status,
    chatHelpers.setMessages,
  ]);

  // Clear error when transport mode changes (user might have fixed the issue)
  useEffect(() => {
    if (
      prevTransportModeForErrorRef.current !== transportMode &&
      chatHelpers.error
    ) {
      chatHelpers.clearError();
    }
    prevTransportModeForErrorRef.current = transportMode;
  }, [transportMode, chatHelpers.error, chatHelpers.clearError]);

  // Function to switch transport mode
  const setTransportMode = (mode: TransportMode) => {
    // Only allow switching if not currently streaming
    if (
      chatHelpers.status !== "submitted" &&
      chatHelpers.status !== "streaming"
    ) {
      // Preserve current messages before switching
      if (chatHelpers.messages.length > 0) {
        preservedMessagesRef.current = chatHelpers.messages;
      }
      setTransportModeState(mode);
    }
  };

  return {
    ...chatHelpers,
    transportMode,
    isLocalSupported,
    localAvailability,
    setTransportMode,
    transport,
  };
}

Vercel AI SDK Integration

This hook is a drop-in replacement for useChat from @ai-sdk/react. It seamlessly integrates with the Vercel AI SDK and provides all the same functionality:

  • Full useChat API: Returns all properties and methods from useChat including messages, sendMessage, status, error, stop, isLoading, and more
  • Text Chat Messages: Fully compatible with Vercel AI SDK's UIMessage format for text chat messages
  • Streaming Support: Supports streaming responses just like useChat
  • Transport Abstraction: Automatically manages transport switching between local and remote without changing your code

You can use this hook anywhere you would use useChat - it maintains the exact same interface while adding hybrid transport capabilities.

Hooks API