Fszontagh hace 3 meses
padre
commit
83da0a0e3f

+ 1 - 0
include/server_config.h

@@ -53,6 +53,7 @@ struct ServerConfig {
     std::string loraDir = "";           // LoRA directory (default: loras)
     std::string taesdDir = "";          // TAESD directory (default: TAESD)
     std::string vaeDir = "";            // VAE directory (default: vae)
+    std::string diffusionModelsDir = "";      // Diffusion models directory (default: diffusion_models)
 
     // Queue and output directories
     std::string queueDir = "./queue";        // Directory to store queue job files

+ 0 - 69
test_subfolder_scanning.sh

@@ -1,69 +0,0 @@
-#!/bin/bash
-
-# Test script for subfolder model scanning
-# This script creates a test directory structure and verifies that subfolder models are detected
-
-set -e
-
-TEST_BASE="/tmp/sd_test_models"
-TEST_CHECKPOINTS="$TEST_BASE/checkpoints"
-
-echo "Setting up test environment..."
-
-# Clean up previous test
-rm -rf "$TEST_BASE"
-
-# Create test directory structure
-mkdir -p "$TEST_CHECKPOINTS/subfolder1"
-mkdir -p "$TEST_CHECKPOINTS/subfolder2/deep"
-mkdir -p "$TEST_CHECKPOINTS/subfolder2/nested"
-
-# Create dummy model files
-touch "$TEST_CHECKPOINTS/model1.safetensors"
-touch "$TEST_CHECKPOINTS/subfolder1/model2.safetensors"
-touch "$TEST_CHECKPOINTS/subfolder2/model3.safetensors"
-touch "$TEST_CHECKPOINTS/subfolder2/deep/model4.safetensors"
-touch "$TEST_CHECKPOINTS/subfolder2/nested/model5.safetensors"
-
-# Create other model types
-mkdir -p "$TEST_BASE/loras"
-touch "$TEST_BASE/loras/lora1.safetensors"
-mkdir -p "$TEST_BASE/loras/subfolder"
-touch "$TEST_BASE/loras/subfolder/lora2.safetensors"
-
-echo "Test directory structure created:"
-echo "- $TEST_CHECKPOINTS/model1.safetensors"
-echo "- $TEST_CHECKPOINTS/subfolder1/model2.safetensors"
-echo "- $TEST_CHECKPOINTS/subfolder2/model3.safetensors"
-echo "- $TEST_CHECKPOINTS/subfolder2/deep/model4.safetensors"
-echo "- $TEST_CHECKPOINTS/subfolder2/nested/model5.safetensors"
-echo "- $TEST_BASE/loras/lora1.safetensors"
-echo "- $TEST_BASE/loras/subfolder/lora2.safetensors"
-
-echo ""
-echo "Running model scanning test..."
-
-# Run the server with test directory
-cd /data/stable-diffusion.cpp-rest/build
-timeout 10s ./stable-diffusion-rest-server \
-    --models-dir "$TEST_BASE" \
-    --checkpoints "$TEST_CHECKPOINTS" \
-    --lora-dir "$TEST_BASE/loras" \
-    --host 127.0.0.1 \
-    --port 0 \
-    --verbose || true
-
-echo ""
-echo "Test completed. Check the output above to verify that subfolder models were detected."
-echo ""
-echo "Expected models to be found:"
-echo "- model1.safetensors"
-echo "- subfolder1/model2.safetensors"
-echo "- subfolder2/model3.safetensors"
-echo "- subfolder2/deep/model4.safetensors"
-echo "- subfolder2/nested/model5.safetensors"
-echo "- loras/lora1.safetensors"
-echo "- loras/subfolder/lora2.safetensors"
-
-# Cleanup
-rm -rf "$TEST_BASE"

+ 3 - 3
webui/app/img2img/page.tsx

@@ -58,8 +58,8 @@ export default function Img2ImgPage() {
           apiClient.getModels('lora'),
           apiClient.getModels('embedding'),
         ]);
-        setLoraModels(loras.map(m => m.name));
-        setEmbeddings(embeds.map(m => m.name));
+        setLoraModels(loras.models.map(m => m.name));
+        setEmbeddings(embeds.models.map(m => m.name));
       } catch (err) {
         console.error('Failed to load models:', err);
       }
@@ -139,7 +139,7 @@ export default function Img2ImgPage() {
           setLoading(false);
         } else if (attempts < maxAttempts) {
           attempts++;
-          setTimeout(poll, 1000);
+          setTimeout(poll, 2000);
         } else {
           setError('Job polling timeout');
           setLoading(false);

+ 4 - 4
webui/app/inpainting/page.tsx

@@ -59,8 +59,8 @@ export default function InpaintingPage() {
           apiClient.getModels('lora'),
           apiClient.getModels('embedding'),
         ]);
-        setLoraModels(loras.map(m => m.name));
-        setEmbeddings(embeds.map(m => m.name));
+        setLoraModels(loras.models.map(m => m.name));
+        setEmbeddings(embeds.models.map(m => m.name));
       } catch (err) {
         console.error('Failed to load models:', err);
       }
@@ -136,7 +136,7 @@ export default function InpaintingPage() {
           setLoading(false);
         } else if (attempts < maxAttempts) {
           attempts++;
-          setTimeout(poll, 1000);
+          setTimeout(poll, 2000);
         } else {
           setError('Job polling timeout');
           setLoading(false);
@@ -406,4 +406,4 @@ export default function InpaintingPage() {
       </div>
     </AppLayout>
   );
-}
+}

+ 13 - 6
webui/app/page.tsx

@@ -74,12 +74,19 @@ export default function HomePage() {
   };
 
   useEffect(() => {
-    // Only check health and system info if authenticated
-    if (isAuthenticated) {
-      checkHealth();
-      loadSystemInfo();
-    }
-  }, [isAuthenticated]);
+    // Check health and system info regardless of authentication status
+    checkHealth();
+    loadSystemInfo();
+
+    // Set up periodic health checks
+    const healthInterval = setInterval(checkHealth, 30000); // Check every 30 seconds
+    const systemInfoInterval = setInterval(loadSystemInfo, 60000); // Check every minute
+
+    return () => {
+      clearInterval(healthInterval);
+      clearInterval(systemInfoInterval);
+    };
+  }, []);
 
   // Show loading state while checking authentication
   if (isLoading) {

+ 4 - 4
webui/app/text2img/page.tsx

@@ -56,9 +56,9 @@ export default function Text2ImgPage() {
         ]);
         setSamplers(samplersData);
         setSchedulers(schedulersData);
-        setVaeModels(models);
-        setLoraModels(loras.map(m => m.name));
-        setEmbeddings(embeds.map(m => m.name));
+        setVaeModels(models.models);
+        setLoraModels(loras.models.map(m => m.name));
+        setEmbeddings(embeds.models.map(m => m.name));
       } catch (err) {
         console.error('Failed to load options:', err);
       }
@@ -124,7 +124,7 @@ export default function Text2ImgPage() {
           setLoading(false);
         } else if (attempts < maxAttempts) {
           attempts++;
-          setTimeout(poll, 1000);
+          setTimeout(poll, 2000);
         } else {
           setError('Job polling timeout');
           setLoading(false);

+ 2 - 2
webui/app/upscaler/page.tsx

@@ -46,7 +46,7 @@ export default function UpscalerPage() {
           apiClient.getModels('esrgan'),
           apiClient.getModels('upscaler'),
         ]);
-        const allModels = [...esrganModels, ...upscalerMods];
+        const allModels = [...esrganModels.models, ...upscalerMods.models];
         setUpscalerModels(allModels);
         // Set first model as default
         if (allModels.length > 0 && !formData.model) {
@@ -129,7 +129,7 @@ export default function UpscalerPage() {
           setLoading(false);
         } else if (attempts < maxAttempts) {
           attempts++;
-          setTimeout(poll, 1000);
+          setTimeout(poll, 2000);
         } else {
           setError('Job polling timeout');
           setLoading(false);

+ 80 - 31
webui/components/enhanced-queue-list.tsx

@@ -41,17 +41,17 @@ type ViewMode = 'compact' | 'detailed';
 // Debounce utility for frequent updates
 function useDebounce<T>(value: T, delay: number): T {
   const [debouncedValue, setDebouncedValue] = useState<T>(value);
-  
+
   useEffect(() => {
     const handler = setTimeout(() => {
       setDebouncedValue(value);
     }, delay);
-    
+
     return () => {
       clearTimeout(handler);
     };
   }, [value, delay]);
-  
+
   return debouncedValue;
 }
 
@@ -59,7 +59,7 @@ function useDebounce<T>(value: T, delay: number): T {
 function useThrottle<T>(value: T, delay: number): T {
   const [throttledValue, setThrottledValue] = useState<T>(value);
   const lastExecuted = useRef<number>(0);
-  
+
   useEffect(() => {
     const now = Date.now();
     if (now - lastExecuted.current >= delay) {
@@ -67,7 +67,7 @@ function useThrottle<T>(value: T, delay: number): T {
       lastExecuted.current = now;
     }
   }, [value, delay]);
-  
+
   return throttledValue;
 }
 
@@ -86,7 +86,7 @@ export function EnhancedQueueList({
 
   // Debounce the queue status to prevent excessive updates
   const debouncedQueueStatus = useDebounce(queueStatus, 100);
-  
+
   // Throttle progress updates to reduce rendering frequency
   const throttledJobs = useThrottle(debouncedQueueStatus?.jobs || [], 200);
 
@@ -157,7 +157,7 @@ export function EnhancedQueueList({
 
     // Sort within each status group and concatenate
     const result: JobInfo[] = [];
-    
+
     Object.entries(statusPriority)
       .sort(([,a], [,b]) => a - b)
       .forEach(([status]) => {
@@ -216,7 +216,7 @@ export function EnhancedQueueList({
   // Optimized duration formatting
   const formatDuration = useCallback((startTime: string, endTime?: string) => {
     if (!startTime) return 'Unknown';
-    
+
     const start = new Date(startTime).getTime();
     const end = endTime ? new Date(endTime).getTime() : Date.now();
     const duration = Math.max(0, Math.floor((end - start) / 1000));
@@ -252,27 +252,28 @@ export function EnhancedQueueList({
     }
   }, [getJobType]);
 
+  // Generate image URL from file path
+  const getImageUrl = useCallback((jobId: string, output: { url: string; path: string }) => {
+    // If we have a URL, use it directly. Otherwise construct from jobId and filename
+    if (output.url) {
+      return output.url;
+    }
+    const filename = output.path.split('/').pop();
+    return `/api/queue/job/${jobId}/output/${filename}`;
+  }, []);
+
   // Optimized parameter extraction
   const extractParameters = useCallback((job: JobInfo) => {
-    if (!job.message) return {};
-    
     const params: Record<string, any> = {};
     const message = job.message;
-    
-    // Use more efficient regex patterns
-    const promptMatch = message.match(/prompt[:\s]+([^\n]+)/i);
-    if (promptMatch) params.prompt = promptMatch[1].trim().substring(0, 100); // Limit length
-    
-    const stepsMatch = message.match(/steps[:\s]+(\d+)/i);
-    if (stepsMatch) params.steps = parseInt(stepsMatch[1], 10);
-    
-    const cfgMatch = message.match(/cfg[:\s]+([\d.]+)/i);
-    if (cfgMatch) params.cfg_scale = parseFloat(cfgMatch[1]);
-    
-    const sizeMatch = message.match(/(\d+)x(\d+)/);
-    if (sizeMatch) {
-      params.width = parseInt(sizeMatch[1], 10);
-      params.height = parseInt(sizeMatch[2], 10);
+
+    if (message) {
+      params.message = message.substring(0, 100); // Limit length
+    }
+
+    // Extract additional parameters from job result if available
+    if (job.result?.images?.length) {
+      params.images = job.result.images.length;
     }
 
     return params;
@@ -282,11 +283,11 @@ export function EnhancedQueueList({
   const copyParameters = useCallback((job: JobInfo) => {
     const params = extractParameters(job);
     if (Object.keys(params).length === 0) return;
-    
+
     const paramsText = Object.entries(params)
       .map(([key, value]) => `${key}: ${value}`)
       .join('\n');
-    
+
     // Use clipboard API with fallback
     if (navigator.clipboard?.writeText) {
       navigator.clipboard.writeText(paramsText).catch(() => {
@@ -299,7 +300,7 @@ export function EnhancedQueueList({
         document.body.removeChild(textArea);
       });
     }
-    
+
     onCopyParameters?.(job);
   }, [extractParameters, onCopyParameters]);
 
@@ -315,7 +316,7 @@ export function EnhancedQueueList({
   // Progress update optimization - only update progress if it changed significantly
   const ProgressBar = useCallback(({ job }: { job: JobInfo }) => {
     if (job.progress === undefined) return null;
-    
+
     const progressValue = job.progress * 100;
     return (
       <div className="space-y-2">
@@ -552,8 +553,56 @@ export function EnhancedQueueList({
                       </div>
                     )}
 
-                    {/* Results */}
-                    {job.status === 'completed' && job.result?.images && (
+                    {/* Results - Fixed to use outputs instead of result.images */}
+                    {job.status === 'completed' && job.outputs && job.outputs.length > 0 && (
+                      <div className="space-y-2">
+                        <h4 className="font-medium text-sm flex items-center gap-1">
+                          <Image className="h-4 w-4" />
+                          Generated Images ({job.outputs.length})
+                        </h4>
+                        <div className="grid grid-cols-2 md:grid-cols-4 gap-2">
+                          {job.outputs.map((output, index) => (
+                            <div key={index} className="relative group">
+                              <div className="aspect-square bg-muted rounded-lg overflow-hidden">
+                                <img
+                                  src={getImageUrl(jobId, output)}
+                                  alt={`Generated image ${index + 1}`}
+                                  className="w-full h-full object-cover"
+                                  loading="lazy"
+                                  onError={(e) => {
+                                    // Fallback to icon if image fails to load
+                                    const target = e.target as HTMLImageElement;
+                                    target.style.display = 'none';
+                                    const parent = target.parentElement;
+                                    if (parent) {
+                                      parent.innerHTML = '<div class="w-full h-full flex items-center justify-center text-muted-foreground"><svg class="h-8 w-8" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M4 16l4.586-4.586a2 2 0 012.828 0L16 16m-2-2l1.586-1.586a2 2 0 012.828 0L20 14m-6-6h.01M6 20h12a2 2 0 002-2V6a2 2 0 00-2-2H6a2 2 0 00-2 2v12a2 2 0 002 2z"></path></svg></div>';
+                                    }
+                                  }}
+                                />
+                              </div>
+                              <div className="absolute inset-0 bg-black/50 opacity-0 group-hover:opacity-100 transition-opacity rounded-lg flex items-center justify-center">
+                                <Button
+                                  variant="secondary"
+                                  size="sm"
+                                  onClick={() => {
+                                    const url = getImageUrl(jobId, output);
+                                    const link = document.createElement('a');
+                                    link.href = url;
+                                    link.download = output.filename || `generated-image-${index + 1}.png`;
+                                    link.click();
+                                  }}
+                                >
+                                  Download
+                                </Button>
+                              </div>
+                            </div>
+                          ))}
+                        </div>
+                      </div>
+                    )}
+
+                    {/* Results - Also support result.images for backwards compatibility */}
+                    {job.status === 'completed' && job.result?.images && job.result.images.length > 0 && (
                       <div className="space-y-2">
                         <h4 className="font-medium text-sm flex items-center gap-1">
                           <Image className="h-4 w-4" />

+ 67 - 10
webui/components/model-list.tsx

@@ -23,7 +23,10 @@ import {
   EyeOff,
   Filter,
   Grid3X3,
-  List
+  List,
+  ChevronLeft,
+  ChevronRight,
+  Image
 } from 'lucide-react';
 import { cn } from '@/lib/utils';
 
@@ -35,6 +38,18 @@ interface ModelListProps {
   onUnloadModel: (modelId: string) => void;
   onConvertModel?: (modelName: string, quantizationType: string) => void;
   actionLoading: string | null;
+  pagination?: {
+    page: number;
+    limit: number;
+    total_count: number;
+    total_pages: number;
+    has_next: boolean;
+    has_prev: boolean;
+  };
+  statistics?: any;
+  searchTerm?: string;
+  onSearchChange?: (term: string) => void;
+  onLoadPage?: (page: number) => void;
 }
 
 type ViewMode = 'grid' | 'list';
@@ -46,13 +61,22 @@ export function ModelList({
   onLoadModel,
   onUnloadModel,
   onConvertModel,
-  actionLoading
+  actionLoading,
+  pagination,
+  statistics,
+  searchTerm: externalSearchTerm = '',
+  onSearchChange,
+  onLoadPage
 }: ModelListProps) {
-  const [searchTerm, setSearchTerm] = useState('');
+  const [internalSearchTerm, setInternalSearchTerm] = useState('');
   const [selectedType, setSelectedType] = useState<string>('all');
   const [showFullPaths, setShowFullPaths] = useState(false);
   const [viewMode, setViewMode] = useState<ViewMode>('grid');
 
+  // Use external search term if provided, otherwise use internal state
+  const currentSearchTerm = externalSearchTerm || internalSearchTerm;
+  const setSearchTerm = onSearchChange || setInternalSearchTerm;
+
   // Calculate model type statistics
   const modelStats = useMemo(() => {
     const stats = models.reduce((acc, model) => {
@@ -71,12 +95,12 @@ export function ModelList({
   // Filter models
   const filteredModels = useMemo(() => {
     return models.filter(model => {
-      const matchesSearch = model.name.toLowerCase().includes(searchTerm.toLowerCase()) ||
-                           (model.sha256_short && model.sha256_short.toLowerCase().includes(searchTerm.toLowerCase()));
+      const matchesSearch = model.name.toLowerCase().includes(currentSearchTerm.toLowerCase()) ||
+                           (model.sha256_short && model.sha256_short.toLowerCase().includes(currentSearchTerm.toLowerCase()));
       const matchesType = selectedType === 'all' || model.type === selectedType;
       return matchesSearch && matchesType;
     });
-  }, [models, searchTerm, selectedType]);
+  }, [models, currentSearchTerm, selectedType]);
 
   // Get display name (compact vs full path)
   const getDisplayName = (model: ModelInfo) => {
@@ -152,7 +176,7 @@ export function ModelList({
                 <Input
                   id="search"
                   placeholder="Search by name or hash..."
-                  value={searchTerm}
+                  value={currentSearchTerm}
                   onChange={(e) => setSearchTerm(e.target.value)}
                   className="pl-10"
                 />
@@ -388,6 +412,42 @@ export function ModelList({
         </div>
       )}
 
+      {/* Pagination */}
+      {pagination && pagination.total_pages > 1 && (
+        <Card>
+          <CardContent className="flex items-center justify-between p-4">
+            <div className="text-sm text-muted-foreground">
+              Showing {((pagination.page - 1) * pagination.limit) + 1} to{' '}
+              {Math.min(pagination.page * pagination.limit, pagination.total_count)} of{' '}
+              {pagination.total_count} models
+            </div>
+            <div className="flex items-center gap-2">
+              <Button
+                variant="outline"
+                size="sm"
+                onClick={() => onLoadPage && onLoadPage(pagination.page - 1)}
+                disabled={!pagination.has_prev}
+              >
+                <ChevronLeft className="h-4 w-4" />
+                Previous
+              </Button>
+              <div className="text-sm">
+                Page {pagination.page} of {pagination.total_pages}
+              </div>
+              <Button
+                variant="outline"
+                size="sm"
+                onClick={() => onLoadPage && onLoadPage(pagination.page + 1)}
+                disabled={!pagination.has_next}
+              >
+                Next
+                <ChevronRight className="h-4 w-4" />
+              </Button>
+            </div>
+          </CardContent>
+        </Card>
+      )}
+
       {filteredModels.length === 0 && (
         <Card>
           <CardContent className="text-center py-12">
@@ -398,6 +458,3 @@ export function ModelList({
     </div>
   );
 }
-
-// Import Image icon that was missing
-import { Image } from 'lucide-react';

+ 45 - 4
webui/components/model-status-bar.tsx

@@ -2,7 +2,7 @@
 
 import { useState, useEffect } from 'react';
 import { apiClient, type ModelInfo, type QueueStatus, type JobInfo } from '@/lib/api';
-import { AlertCircle, CheckCircle2, Loader2, Activity } from 'lucide-react';
+import { AlertCircle, CheckCircle2, Loader2, Activity, Image } from 'lucide-react';
 import { cn } from '@/lib/utils';
 
 export function ModelStatusBar() {
@@ -10,6 +10,7 @@ export function ModelStatusBar() {
   const [loading, setLoading] = useState(true);
   const [queueStatus, setQueueStatus] = useState<QueueStatus | null>(null);
   const [activeJob, setActiveJob] = useState<JobInfo | null>(null);
+  const [recentlyCompleted, setRecentlyCompleted] = useState<JobInfo[]>([]);
 
   useEffect(() => {
     const checkStatus = async () => {
@@ -19,7 +20,7 @@ export function ModelStatusBar() {
           apiClient.getQueueStatus(),
         ]);
 
-        setLoadedModel(loadedModels.length > 0 ? loadedModels[0] : null);
+        setLoadedModel(loadedModels.models.length > 0 ? loadedModels.models[0] : null);
         setQueueStatus(queue);
 
         // Find active/processing job
@@ -27,6 +28,18 @@ export function ModelStatusBar() {
           (job) => job.status === 'processing' || job.status === 'queued'
         );
         setActiveJob(processing || null);
+
+        // Keep track of recently completed jobs (last 30 seconds)
+        const now = Date.now();
+        const thirtySecondsAgo = now - 30000;
+
+        // Update recently completed jobs
+        const completedJobs = queue.jobs.filter(
+          (job) => job.status === 'completed' &&
+                   job.updated_at &&
+                   new Date(job.updated_at).getTime() > thirtySecondsAgo
+        );
+        setRecentlyCompleted(completedJobs);
       } catch (error) {
         console.error('Failed to check status:', error);
       } finally {
@@ -36,8 +49,8 @@ export function ModelStatusBar() {
 
     checkStatus();
 
-    // Poll every 1 second when there's an active job, otherwise every 5 seconds
-    const pollInterval = activeJob ? 1000 : 5000;
+    // Poll every 2 seconds when there's an active job, otherwise every 5 seconds
+    const pollInterval = activeJob ? 2000 : 5000;
     const interval = setInterval(checkStatus, pollInterval);
 
     return () => clearInterval(interval);
@@ -92,6 +105,34 @@ export function ModelStatusBar() {
         )}
       </>
     );
+  } else if (recentlyCompleted.length > 0) {
+    // Show recently completed jobs with their results
+    const latestCompleted = recentlyCompleted[0];
+    const hasOutputs = (latestCompleted.outputs?.length ?? 0) > 0 || (latestCompleted.result?.images?.length ?? 0) > 0;
+
+    statusBg = 'bg-green-600 dark:bg-green-700';
+    statusBorder = 'border-green-500 dark:border-green-600';
+    statusText = 'text-white';
+    icon = hasOutputs ? <Image className="h-4 w-4 flex-shrink-0" /> : <CheckCircle2 className="h-4 w-4 flex-shrink-0" />;
+
+    const outputCount = (latestCompleted.outputs?.length ?? 0) + (latestCompleted.result?.images?.length ?? 0);
+    content = (
+      <>
+        <span className="font-semibold">Completed:</span>
+        <span className="truncate">{latestCompleted.id}</span>
+        {hasOutputs && (
+          <>
+            <span className="text-sm">• Generated {outputCount} image{outputCount !== 1 ? 's' : ''}</span>
+            <div className="flex items-center gap-2 ml-auto">
+              <div className="w-40 h-2.5 bg-green-900/50 dark:bg-green-950/50 rounded-full overflow-hidden border border-green-400/30">
+                <div className="h-full bg-green-200 dark:bg-green-300" style={{ width: '100%' }} />
+              </div>
+              <span className="text-sm font-semibold min-w-[3rem] text-right">100%</span>
+            </div>
+          </>
+        )}
+      </>
+    );
   } else if (loadedModel) {
     // Model loaded, ready
     statusBg = 'bg-green-600 dark:bg-green-700';

+ 72 - 29
webui/lib/api.ts

@@ -40,10 +40,10 @@ class RequestThrottler {
   getWaitTime(key: string): number {
     const request = this.requests.get(key);
     if (!request) return 0;
-    
+
     const now = Date.now();
     if (now >= request.resetTime) return 0;
-    
+
     return request.resetTime - now;
   }
 }
@@ -58,18 +58,18 @@ function debounce<T extends (...args: any[]) => any>(
   immediate?: boolean
 ): (...args: Parameters<T>) => void {
   let timeout: NodeJS.Timeout | null = null;
-  
+
   return function executedFunction(...args: Parameters<T>) {
     const later = () => {
       timeout = null;
       if (!immediate) func(...args);
     };
-    
+
     const callNow = immediate && !timeout;
-    
+
     if (timeout) clearTimeout(timeout);
     timeout = setTimeout(later, wait);
-    
+
     if (callNow) func(...args);
   };
 }
@@ -295,7 +295,7 @@ class ApiClient {
     }
 
     const endpoints = ['/queue/status', '/health', '/status', '/'];
-    
+
     for (const endpoint of endpoints) {
       try {
         const response = await fetch(`${this.getBaseUrl()}${endpoint}`, {
@@ -309,7 +309,7 @@ class ApiClient {
 
         if (response.ok) {
           const data = await response.json();
-          
+
           // For queue status, consider it healthy if it returns valid structure
           if (endpoint === '/queue/status' && data.queue) {
             const result = {
@@ -320,7 +320,7 @@ class ApiClient {
             cache.set(cacheKey, result, 10000); // Cache for 10 seconds
             return result;
           }
-          
+
           // For other health endpoints
           const healthStatus: HealthStatus = {
             status: 'ok',
@@ -424,14 +424,14 @@ class ApiClient {
     }
 
     const result = await this.request<JobInfo>(`/queue/job/${jobId}`);
-    
+
     // Cache job status for a short time
     if (result.status === 'processing' || result.status === 'queued') {
       cache.set(cacheKey, result, 2000); // Cache for 2 seconds for active jobs
     } else {
       cache.set(cacheKey, result, 10000); // Cache for 10 seconds for completed jobs
     }
-    
+
     return result;
   }
 
@@ -494,7 +494,7 @@ class ApiClient {
   async cancelJob(jobId: string): Promise<void> {
     // Clear job status cache when cancelling
     cache.delete(`job_status_${jobId}`);
-    
+
     return this.request<void>('/queue/cancel', {
       method: 'POST',
       body: JSON.stringify({ job_id: jobId }),
@@ -510,31 +510,31 @@ class ApiClient {
     }
 
     const response = await this.request<{ queue: QueueStatus }>('/queue/status');
-    
+
     // Cache queue status based on current activity
-    const hasActiveJobs = response.queue.jobs.some(job => 
+    const hasActiveJobs = response.queue.jobs.some(job =>
       job.status === 'processing' || job.status === 'queued'
     );
-    
+
     // Cache for shorter time if there are active jobs
     const cacheTime = hasActiveJobs ? 1000 : 5000; // 1 second for active, 5 seconds for idle
     cache.set(cacheKey, response.queue, cacheTime);
-    
+
     return response.queue;
   }
 
   async clearQueue(): Promise<void> {
     // Clear all related caches
     cache.delete('queue_status');
-    
+
     return this.request<void>('/queue/clear', {
       method: 'POST',
     });
   }
 
   // Model management
-  async getModels(type?: string, loaded?: boolean): Promise<ModelInfo[]> {
-    const cacheKey = `models_${type || 'all'}_${loaded ? 'loaded' : 'all'}`;
+  async getModels(type?: string, loaded?: boolean, page: number = 1, limit: number = 50, search?: string): Promise<{ models: ModelInfo[]; pagination: any; statistics: any }> {
+    const cacheKey = `models_${type || 'all'}_${loaded ? 'loaded' : 'all'}_${page}_${limit}_${search || 'all'}`;
     const cachedResult = cache.get(cacheKey);
     if (cachedResult) {
       return cachedResult;
@@ -544,22 +544,65 @@ class ApiClient {
     const params = [];
     if (type && type !== 'loaded') params.push(`type=${type}`);
     if (type === 'loaded' || loaded) params.push('loaded=true');
-    // Request a high limit to get all models (default is 50)
-    params.push('limit=1000');
+    params.push(`page=${page}`);
+    params.push(`limit=${limit}`);
+    if (search) params.push(`search=${encodeURIComponent(search)}`);
+
+    // Add include_metadata for additional information
+    params.push('include_metadata=true');
+
     if (params.length > 0) endpoint += '?' + params.join('&');
 
-    const response = await this.request<{ models: ModelInfo[] }>(endpoint);
+    const response = await this.request<{
+      models: ModelInfo[];
+      pagination: {
+        page: number;
+        limit: number;
+        total_count: number;
+        total_pages: number;
+        has_next: boolean;
+        has_prev: boolean
+      };
+      statistics: any;
+    }>(endpoint);
+
     const models = response.models.map(model => ({
       ...model,
       id: model.sha256_short || model.name,
       size: model.file_size || model.size,
       path: model.path || model.name,
     }));
-    
+
+    const result = {
+      models,
+      pagination: response.pagination,
+      statistics: response.statistics || {}
+    };
+
     // Cache models for 30 seconds as they don't change frequently
-    cache.set(cacheKey, models, 30000);
-    
-    return models;
+    cache.set(cacheKey, result, 30000);
+
+    return result;
+  }
+
+  // Get all models (for backward compatibility)
+  async getAllModels(type?: string, loaded?: boolean): Promise<ModelInfo[]> {
+    const allModels: ModelInfo[] = [];
+    let page = 1;
+    const limit = 100;
+
+    while (true) {
+      const response = await this.getModels(type, loaded, page, limit);
+      allModels.push(...response.models);
+
+      if (!response.pagination.has_next) {
+        break;
+      }
+
+      page++;
+    }
+
+    return allModels;
   }
 
   async getModelInfo(modelId: string): Promise<ModelInfo> {
@@ -577,7 +620,7 @@ class ApiClient {
   async loadModel(modelId: string): Promise<void> {
     // Clear model cache when loading
     cache.delete(`model_info_${modelId}`);
-    
+
     return this.request<void>(`/models/${modelId}/load`, {
       method: 'POST',
     });
@@ -586,7 +629,7 @@ class ApiClient {
   async unloadModel(modelId: string): Promise<void> {
     // Clear model cache when unloading
     cache.delete(`model_info_${modelId}`);
-    
+
     return this.request<void>(`/models/${modelId}/unload`, {
       method: 'POST',
     });
@@ -595,7 +638,7 @@ class ApiClient {
   async scanModels(): Promise<void> {
     // Clear all model caches when scanning
     cache.clear();
-    
+
     return this.request<void>('/models/refresh', {
       method: 'POST',
     });