generation_queue.cpp 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633
  1. #include "generation_queue.h"
  2. #include "model_manager.h"
  3. #include "stable_diffusion_wrapper.h"
  4. #include "utils.h"
  5. #include "logger.h"
  6. #include <iostream>
  7. #include <random>
  8. #include <sstream>
  9. #include <iomanip>
  10. #include <algorithm>
  11. #include <fstream>
  12. #include <filesystem>
  13. #include <nlohmann/json.hpp>
  14. #include <vector>
  15. #define STB_IMAGE_WRITE_IMPLEMENTATION
  16. #include "../stable-diffusion.cpp-src/thirdparty/stb_image_write.h"
  17. #define STB_IMAGE_IMPLEMENTATION
  18. #include "../stable-diffusion.cpp-src/thirdparty/stb_image.h"
  19. class GenerationQueue::Impl {
  20. public:
  21. // Model manager reference
  22. ModelManager* modelManager = nullptr;
  23. // Thread management
  24. std::thread workerThread;
  25. std::atomic<bool> running{false};
  26. std::atomic<bool> stopRequested{false};
  27. // Queue management
  28. mutable std::mutex queueMutex;
  29. std::condition_variable queueCondition;
  30. std::queue<GenerationRequest> requestQueue;
  31. // Job tracking
  32. mutable std::mutex jobsMutex;
  33. std::unordered_map<std::string, JobInfo> activeJobs;
  34. std::unordered_map<std::string, std::promise<GenerationResult>> jobPromises;
  35. // Hash job tracking
  36. std::map<std::string, std::shared_ptr<std::promise<HashResult>>> hashPromises;
  37. std::map<std::string, HashRequest> hashRequests;
  38. // Conversion job tracking
  39. std::map<std::string, std::shared_ptr<std::promise<ConversionResult>>> conversionPromises;
  40. std::map<std::string, ConversionRequest> conversionRequests;
  41. // Configuration
  42. int maxConcurrentGenerations = 1;
  43. std::string queueDir = "./queue";
  44. std::string outputDir = "./output";
  45. // Statistics
  46. std::atomic<size_t> queueSize{0};
  47. std::atomic<size_t> activeGenerations{0};
  48. std::atomic<uint64_t> totalJobsProcessed{0};
  49. // Worker thread function
  50. void workerThreadFunction() {
  51. LOG_INFO("GenerationQueue worker thread started");
  52. while (running.load() && !stopRequested.load()) {
  53. std::unique_lock<std::mutex> lock(queueMutex);
  54. // Wait for a request or stop signal
  55. queueCondition.wait(lock, [this] {
  56. return !requestQueue.empty() || stopRequested.load();
  57. });
  58. if (stopRequested.load()) {
  59. break;
  60. }
  61. if (requestQueue.empty()) {
  62. continue;
  63. }
  64. // Get the next request
  65. GenerationRequest request = requestQueue.front();
  66. requestQueue.pop();
  67. queueSize.store(requestQueue.size());
  68. lock.unlock();
  69. // Process the request
  70. processRequest(request);
  71. }
  72. LOG_INFO("GenerationQueue worker thread stopped");
  73. }
  74. void processRequest(const GenerationRequest& request) {
  75. // Check if this is a hash job
  76. if (request.prompt == "HASH_JOB") {
  77. auto hashIt = hashRequests.find(request.id);
  78. if (hashIt != hashRequests.end()) {
  79. HashResult result = performHashJob(hashIt->second);
  80. auto promiseIt = hashPromises.find(request.id);
  81. if (promiseIt != hashPromises.end()) {
  82. promiseIt->second->set_value(result);
  83. hashPromises.erase(promiseIt);
  84. }
  85. hashRequests.erase(hashIt);
  86. }
  87. return;
  88. }
  89. // Check if this is a conversion job
  90. if (request.prompt == "CONVERSION_JOB") {
  91. auto convIt = conversionRequests.find(request.id);
  92. if (convIt != conversionRequests.end()) {
  93. ConversionResult result = performConversionJob(convIt->second);
  94. auto promiseIt = conversionPromises.find(request.id);
  95. if (promiseIt != conversionPromises.end()) {
  96. promiseIt->second->set_value(result);
  97. conversionPromises.erase(promiseIt);
  98. }
  99. conversionRequests.erase(convIt);
  100. }
  101. return;
  102. }
  103. auto startTime = std::chrono::system_clock::now();
  104. // Update job status to PROCESSING (only if not already in PROCESSING from model loading)
  105. {
  106. std::lock_guard<std::mutex> lock(jobsMutex);
  107. if (activeJobs.find(request.id) != activeJobs.end()) {
  108. // Only change status if it was QUEUED (not MODEL_LOADING)
  109. if (activeJobs[request.id].status == GenerationStatus::QUEUED) {
  110. activeJobs[request.id].status = GenerationStatus::PROCESSING;
  111. activeJobs[request.id].progress = 0.0f;
  112. activeJobs[request.id].modelLoadProgress = 0.0f;
  113. activeJobs[request.id].generationProgress = 0.0f;
  114. activeJobs[request.id].firstGenerationCallback = true; // Initialize first callback flag
  115. } else if (activeJobs[request.id].status == GenerationStatus::MODEL_LOADING) {
  116. // Transition from MODEL_LOADING to PROCESSING
  117. activeJobs[request.id].status = GenerationStatus::PROCESSING;
  118. // Model loading should already be complete at this point
  119. activeJobs[request.id].modelLoadProgress = 1.0f;
  120. activeJobs[request.id].firstGenerationCallback = true; // Initialize first callback flag
  121. }
  122. activeJobs[request.id].startTime = startTime;
  123. activeJobs[request.id].currentStep = 0;
  124. activeJobs[request.id].totalSteps = 0;
  125. if (activeJobs[request.id].timeElapsed == 0) {
  126. activeJobs[request.id].timeElapsed = 0;
  127. }
  128. if (activeJobs[request.id].timeRemaining == 0) {
  129. activeJobs[request.id].timeRemaining = 0;
  130. }
  131. if (activeJobs[request.id].speed == 0.0f) {
  132. activeJobs[request.id].speed = 0.0f;
  133. }
  134. saveJobToFile(activeJobs[request.id]);
  135. }
  136. }
  137. activeGenerations.store(1); // Only one generation at a time
  138. LOG_INFO("Processing generation request: " + request.id +
  139. " (prompt: " + request.prompt.substr(0, 50) +
  140. (request.prompt.length() > 50 ? "..." : "") + ")");
  141. // Real generation logic using stable-diffusion.cpp with progress tracking
  142. GenerationResult result = performActualGeneration(request, request.id);
  143. auto endTime = std::chrono::system_clock::now();
  144. auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(endTime - startTime);
  145. result.generationTime = duration.count();
  146. // Update job status to COMPLETED/FAILED
  147. {
  148. std::lock_guard<std::mutex> lock(jobsMutex);
  149. if (activeJobs.find(request.id) != activeJobs.end()) {
  150. float finalProgress = activeJobs[request.id].progress;
  151. auto completionTime = std::chrono::system_clock::now();
  152. auto totalGenerationTime = std::chrono::duration_cast<std::chrono::milliseconds>(completionTime - startTime).count();
  153. LOG_DEBUG("[TIMING_ANALYSIS] Job " + request.id +
  154. " - Total generation time: " + std::to_string(totalGenerationTime) + "ms");
  155. LOG_DEBUG("[JOB_COMPLETION] Job " + request.id +
  156. " - Status changing to '" + (result.success ? "completed" : "failed") + "'" +
  157. " - Progress at completion: " + std::to_string(static_cast<int>(finalProgress * 100.0f)) + "%");
  158. activeJobs[request.id].status = result.success ? GenerationStatus::COMPLETED : GenerationStatus::FAILED;
  159. activeJobs[request.id].endTime = endTime;
  160. // Set final progress to 100% if successful
  161. if (result.success) {
  162. activeJobs[request.id].progress = 1.0f;
  163. if (activeJobs[request.id].totalSteps > 0) {
  164. activeJobs[request.id].currentStep = activeJobs[request.id].totalSteps;
  165. }
  166. }
  167. // Store output files, error message, and generation results
  168. activeJobs[request.id].outputFiles = result.imagePaths;
  169. activeJobs[request.id].errorMessage = result.errorMessage;
  170. activeJobs[request.id].actualSeed = result.actualSeed;
  171. activeJobs[request.id].generationTime = result.generationTime;
  172. {
  173. std::ostringstream oss;
  174. oss << "Job " << request.id << " completed. Success: " << (result.success ? "true" : "false")
  175. << ", Image paths: " << result.imagePaths.size();
  176. LOG_DEBUG(oss.str());
  177. }
  178. for (size_t i = 0; i < result.imagePaths.size(); ++i) {
  179. {
  180. std::ostringstream oss2;
  181. oss2 << "Image " << i << ": " << result.imagePaths[i];
  182. LOG_DEBUG(oss2.str());
  183. }
  184. }
  185. if (!result.errorMessage.empty()) {
  186. std::ostringstream err_oss;
  187. err_oss << "Error message: " << result.errorMessage;
  188. LOG_DEBUG(err_oss.str());
  189. }
  190. // Persist to disk
  191. saveJobToFile(activeJobs[request.id]);
  192. }
  193. // Set the promise value
  194. auto it = jobPromises.find(request.id);
  195. if (it != jobPromises.end()) {
  196. it->second.set_value(result);
  197. jobPromises.erase(it);
  198. }
  199. }
  200. activeGenerations.store(0);
  201. totalJobsProcessed.fetch_add(1);
  202. std::string completionMsg = "Completed generation request: " + request.id +
  203. " (success: " + (result.success ? "true" : "false") +
  204. ", time: " + std::to_string(result.generationTime) + "ms)";
  205. if (!result.success && !result.errorMessage.empty()) {
  206. completionMsg += " - Error: " + result.errorMessage;
  207. }
  208. LOG_INFO(completionMsg);
  209. }
  210. // Progress callback that updates model loading progress
  211. void updateModelLoadProgress(const std::string& jobId, float modelProgress, uint64_t timeElapsed) {
  212. std::lock_guard<std::mutex> lock(jobsMutex);
  213. auto it = activeJobs.find(jobId);
  214. if (it != activeJobs.end()) {
  215. // Clamp model loading progress to valid range [0.0, 1.0]
  216. modelProgress = std::max(0.0f, std::min(1.0f, modelProgress));
  217. it->second.modelLoadProgress = modelProgress;
  218. it->second.generationProgress = 0.0f;
  219. // Overall progress during model loading is just the model loading progress
  220. it->second.progress = modelProgress;
  221. it->second.timeElapsed = static_cast<int64_t>(timeElapsed);
  222. // Update status message to reflect model loading
  223. if (!it->second.prompt.empty()) {
  224. size_t bracketPos = it->second.prompt.find(" [Loading model:");
  225. if (bracketPos == std::string::npos) {
  226. it->second.prompt = it->second.prompt + " [Loading model: " + std::to_string(static_cast<int>(modelProgress * 100)) + "%]";
  227. } else {
  228. // Update existing loading message
  229. it->second.prompt = it->second.prompt.substr(0, bracketPos) + " [Loading model: " + std::to_string(static_cast<int>(modelProgress * 100)) + "%]";
  230. }
  231. }
  232. saveJobToFile(it->second);
  233. }
  234. }
  235. // Progress callback that updates generation progress
  236. void updateGenerationProgress(const std::string& jobId, int step, int totalSteps, float genProgress, uint64_t timeElapsed) {
  237. std::lock_guard<std::mutex> lock(jobsMutex);
  238. auto it = activeJobs.find(jobId);
  239. if (it != activeJobs.end()) {
  240. // Clamp generation progress to valid range [0.0, 1.0]
  241. genProgress = std::max(0.0f, std::min(1.0f, genProgress));
  242. it->second.generationProgress = genProgress;
  243. it->second.currentStep = step;
  244. it->second.totalSteps = totalSteps;
  245. it->second.timeElapsed = static_cast<int64_t>(timeElapsed);
  246. // Handle first generation callback specially
  247. if (it->second.firstGenerationCallback) {
  248. // This is the first callback, ignore the incoming genProgress value
  249. // and set overall progress to exactly 50% (model loading complete)
  250. it->second.generationProgress = 0.0f;
  251. it->second.progress = 0.5f;
  252. it->second.firstGenerationCallback = false; // Mark that we've handled the first callback
  253. LOG_DEBUG("First generation callback for job " + jobId +
  254. " - Ignored genProgress (" + std::to_string(genProgress) +
  255. "), reset generation progress to 0.0 and overall progress to 50%");
  256. } else {
  257. // For subsequent callbacks, calculate overall progress normally: 50% for model loading + 50% for generation
  258. it->second.progress = 0.5f + (genProgress * 0.5f);
  259. }
  260. // Clamp overall progress to valid range [0.0, 1.0]
  261. it->second.progress = std::max(0.0f, std::min(1.0f, it->second.progress));
  262. // Calculate time remaining and speed
  263. if (step > 0 && timeElapsed > 0) {
  264. double avgStepTime = static_cast<double>(timeElapsed) / step;
  265. int remainingSteps = totalSteps - step;
  266. it->second.timeRemaining = static_cast<int64_t>(avgStepTime * remainingSteps);
  267. it->second.speed = 1000.0 / avgStepTime; // steps per second
  268. }
  269. // Clean up loading message from prompt
  270. if (!it->second.prompt.empty()) {
  271. size_t bracketPos = it->second.prompt.find(" [Loading model:");
  272. if (bracketPos != std::string::npos) {
  273. it->second.prompt = it->second.prompt.substr(0, bracketPos);
  274. }
  275. }
  276. // Save progress to file periodically (every 10 steps or on significant progress changes)
  277. if (step % 10 == 0 || genProgress >= 0.99f) {
  278. saveJobToFile(it->second);
  279. }
  280. }
  281. }
  282. GenerationResult performActualGeneration(const GenerationRequest& request, const std::string& jobId) {
  283. GenerationResult result;
  284. result.requestId = request.id;
  285. result.success = false;
  286. // Check if model manager is available
  287. if (!modelManager) {
  288. result.errorMessage = "Model manager not available";
  289. return result;
  290. }
  291. if (!modelManager->isModelLoaded(request.modelName)) {
  292. // Update status to show model is being loaded
  293. {
  294. std::lock_guard<std::mutex> lock(jobsMutex);
  295. if (activeJobs.find(request.id) != activeJobs.end()) {
  296. activeJobs[request.id].status = GenerationStatus::MODEL_LOADING;
  297. activeJobs[request.id].progress = 0.0f; // Start at 0% for model loading
  298. saveJobToFile(activeJobs[request.id]);
  299. }
  300. }
  301. // Auto-load the model with progress tracking
  302. auto modelLoadStartTime = std::chrono::system_clock::now();
  303. // Create a progress callback for model loading
  304. auto modelLoadProgressCallback = [this, jobId = request.id, modelLoadStartTime](float loadProgress) {
  305. auto currentTime = std::chrono::system_clock::now();
  306. uint64_t timeElapsed = std::chrono::duration_cast<std::chrono::milliseconds>(currentTime - modelLoadStartTime).count();
  307. updateModelLoadProgress(jobId, loadProgress, timeElapsed);
  308. };
  309. bool loadSuccess = modelManager->loadModel(request.modelName, modelLoadProgressCallback);
  310. if (!loadSuccess || !modelManager->isModelLoaded(request.modelName)) {
  311. result.errorMessage = "Failed to load model: " + request.modelName;
  312. // Update job status to failed
  313. {
  314. std::lock_guard<std::mutex> lock(jobsMutex);
  315. if (activeJobs.find(request.id) != activeJobs.end()) {
  316. activeJobs[request.id].status = GenerationStatus::FAILED;
  317. activeJobs[request.id].errorMessage = result.errorMessage;
  318. saveJobToFile(activeJobs[request.id]);
  319. }
  320. }
  321. return result;
  322. }
  323. // Reset status after successful model loading
  324. {
  325. std::lock_guard<std::mutex> lock(jobsMutex);
  326. if (activeJobs.find(request.id) != activeJobs.end()) {
  327. // Remove the loading progress suffix from prompt
  328. size_t bracketPos = activeJobs[request.id].prompt.find(" [Loading model:");
  329. if (bracketPos != std::string::npos) {
  330. activeJobs[request.id].prompt = activeJobs[request.id].prompt.substr(0, bracketPos);
  331. }
  332. // Set model loading to complete, overall progress to exactly 50% (halfway through)
  333. activeJobs[request.id].modelLoadProgress = 1.0f;
  334. activeJobs[request.id].generationProgress = 0.0f;
  335. activeJobs[request.id].progress = 0.5f;
  336. activeJobs[request.id].firstGenerationCallback = true; // Initialize first callback flag
  337. saveJobToFile(activeJobs[request.id]);
  338. }
  339. }
  340. }
  341. // Get the model wrapper from the shared model manager
  342. auto* modelWrapper = modelManager->getModel(request.modelName);
  343. if (!modelWrapper) {
  344. result.errorMessage = "Model not found or not loaded: " + request.modelName;
  345. return result;
  346. }
  347. // Prepare generation parameters
  348. StableDiffusionWrapper::GenerationParams params;
  349. params.prompt = request.prompt;
  350. params.negativePrompt = request.negativePrompt;
  351. params.width = request.width;
  352. params.height = request.height;
  353. params.batchCount = request.batchCount;
  354. params.steps = request.steps;
  355. params.cfgScale = request.cfgScale;
  356. params.samplingMethod = samplingMethodToString(request.samplingMethod);
  357. params.scheduler = schedulerToString(request.scheduler);
  358. params.clipSkip = request.clipSkip;
  359. params.strength = request.strength;
  360. params.controlStrength = request.controlStrength;
  361. params.nThreads = request.nThreads;
  362. params.offloadParamsToCpu = request.offloadParamsToCpu;
  363. params.clipOnCpu = request.clipOnCpu;
  364. params.vaeOnCpu = request.vaeOnCpu;
  365. params.diffusionFlashAttn = request.diffusionFlashAttn;
  366. params.diffusionConvDirect = request.diffusionConvDirect;
  367. params.vaeConvDirect = request.vaeConvDirect;
  368. // Set model paths if provided
  369. params.modelPath = modelManager->getModelInfo(request.modelName).path;
  370. params.clipLPath = request.clipLPath;
  371. params.clipGPath = request.clipGPath;
  372. params.vaePath = request.vaePath;
  373. params.taesdPath = request.taesdPath;
  374. params.controlNetPath = request.controlNetPath;
  375. params.embeddingDir = request.embeddingDir;
  376. params.loraModelDir = request.loraModelDir;
  377. // Parse seed
  378. if (request.seed == "random") {
  379. std::random_device rd;
  380. std::mt19937 gen(rd());
  381. std::uniform_int_distribution<int64_t> dis;
  382. params.seed = dis(gen);
  383. } else {
  384. try {
  385. params.seed = std::stoll(request.seed);
  386. } catch (...) {
  387. params.seed = 42; // Default seed
  388. }
  389. }
  390. result.actualSeed = params.seed;
  391. // Generate images based on request type with progress tracking
  392. try {
  393. std::vector<StableDiffusionWrapper::GeneratedImage> generatedImages;
  394. // Create progress callback that updates job info
  395. auto progressCallback = [this, jobId](int step, int totalSteps, float progress, void* userData) {
  396. // Calculate time elapsed from start time (stored in userData)
  397. auto startTime = userData ? *static_cast<std::chrono::system_clock::time_point*>(userData) : std::chrono::system_clock::now();
  398. auto currentTime = std::chrono::system_clock::now();
  399. uint64_t timeElapsed = std::chrono::duration_cast<std::chrono::milliseconds>(currentTime - startTime).count();
  400. // Use the new generation progress update function
  401. updateGenerationProgress(jobId, step, totalSteps, progress, timeElapsed);
  402. };
  403. // Store start time to pass as user data
  404. auto generationStartTime = std::chrono::system_clock::now();
  405. switch (request.requestType) {
  406. case GenerationRequest::RequestType::TEXT2IMG:
  407. generatedImages = modelWrapper->generateImage(params, progressCallback, &generationStartTime);
  408. break;
  409. case GenerationRequest::RequestType::IMG2IMG:
  410. if (request.initImageData.empty()) {
  411. result.errorMessage = "No init image data provided for img2img";
  412. return result;
  413. }
  414. generatedImages = modelWrapper->generateImageImg2Img(
  415. params,
  416. request.initImageData,
  417. request.initImageWidth,
  418. request.initImageHeight,
  419. progressCallback,
  420. &generationStartTime
  421. );
  422. break;
  423. case GenerationRequest::RequestType::CONTROLNET:
  424. if (request.controlImageData.empty()) {
  425. result.errorMessage = "No control image data provided for ControlNet";
  426. return result;
  427. }
  428. generatedImages = modelWrapper->generateImageControlNet(
  429. params,
  430. request.controlImageData,
  431. request.controlImageWidth,
  432. request.controlImageHeight,
  433. progressCallback,
  434. &generationStartTime
  435. );
  436. break;
  437. case GenerationRequest::RequestType::UPSCALER:
  438. if (request.initImageData.empty()) {
  439. result.errorMessage = "No input image data provided for upscaling";
  440. return result;
  441. }
  442. if (request.esrganPath.empty()) {
  443. result.errorMessage = "No ESRGAN model path provided for upscaling";
  444. return result;
  445. }
  446. {
  447. auto upscaledImage = modelWrapper->upscaleImage(
  448. request.esrganPath,
  449. request.initImageData,
  450. request.initImageWidth,
  451. request.initImageHeight,
  452. request.initImageChannels,
  453. request.upscaleFactor,
  454. request.nThreads,
  455. request.offloadParamsToCpu,
  456. request.diffusionConvDirect
  457. );
  458. generatedImages.push_back(upscaledImage);
  459. }
  460. break;
  461. case GenerationRequest::RequestType::INPAINTING:
  462. if (request.initImageData.empty()) {
  463. result.errorMessage = "No source image data provided for inpainting";
  464. return result;
  465. }
  466. if (request.maskImageData.empty()) {
  467. result.errorMessage = "No mask image data provided for inpainting";
  468. return result;
  469. }
  470. generatedImages = modelWrapper->generateImageInpainting(
  471. params,
  472. request.initImageData,
  473. request.initImageWidth,
  474. request.initImageHeight,
  475. request.maskImageData,
  476. request.maskImageWidth,
  477. request.maskImageHeight,
  478. progressCallback,
  479. &generationStartTime
  480. );
  481. break;
  482. default:
  483. result.errorMessage = "Unknown request type";
  484. return result;
  485. }
  486. if (generatedImages.empty()) {
  487. result.errorMessage = "Failed to generate images: " + modelWrapper->getLastError();
  488. return result;
  489. }
  490. // Save generated images to files
  491. LOG_DEBUG("[TIMING_ANALYSIS] Job " + request.id + " - Starting post-processing (image saving) phase");
  492. auto postProcessingStart = std::chrono::system_clock::now();
  493. for (size_t i = 0; i < generatedImages.size(); i++) {
  494. const auto& image = generatedImages[i];
  495. LOG_DEBUG("[TIMING_ANALYSIS] Job " + request.id + " - Saving image " + std::to_string(i+1) + "/" + std::to_string(generatedImages.size()));
  496. auto imageSaveStart = std::chrono::system_clock::now();
  497. std::string imagePath = saveImageToFile(image, request.id, i);
  498. auto imageSaveEnd = std::chrono::system_clock::now();
  499. auto imageSaveTime = std::chrono::duration_cast<std::chrono::milliseconds>(imageSaveEnd - imageSaveStart).count();
  500. LOG_DEBUG("[TIMING_ANALYSIS] Job " + request.id + " - Image " + std::to_string(i+1) + " save took " + std::to_string(imageSaveTime) + "ms");
  501. if (!imagePath.empty()) {
  502. result.imagePaths.push_back(imagePath);
  503. } else {
  504. result.errorMessage = "Failed to save generated image " + std::to_string(i);
  505. return result;
  506. }
  507. }
  508. auto postProcessingEnd = std::chrono::system_clock::now();
  509. auto postProcessingTime = std::chrono::duration_cast<std::chrono::milliseconds>(postProcessingEnd - postProcessingStart).count();
  510. LOG_DEBUG("[TIMING_ANALYSIS] Job " + request.id + " - Post-processing completed in " + std::to_string(postProcessingTime) + "ms");
  511. result.success = true;
  512. result.generationTime = generatedImages.empty() ? 0 : generatedImages[0].generationTime;
  513. result.errorMessage = "";
  514. } catch (const std::exception& e) {
  515. result.errorMessage = "Exception during generation: " + std::string(e.what());
  516. }
  517. return result;
  518. }
  519. std::string saveImageToFile(const StableDiffusionWrapper::GeneratedImage& image, const std::string& requestId, size_t index) {
  520. // Create job-specific output directory
  521. std::string jobOutputDir = outputDir + "/" + requestId;
  522. std::error_code ec;
  523. std::filesystem::create_directories(jobOutputDir, ec);
  524. if (ec) {
  525. LOG_ERROR("Failed to create output directory " + jobOutputDir + ": " + ec.message());
  526. return "";
  527. }
  528. // Generate filename
  529. std::stringstream ss;
  530. ss << jobOutputDir << "/" << requestId << "_" << index << ".png";
  531. std::string filename = ss.str();
  532. LOG_DEBUG("Attempting to save image to: " + filename);
  533. // Check if image data is valid
  534. if (image.data.empty() || image.width <= 0 || image.height <= 0) {
  535. LOG_ERROR("Invalid image data for " + requestId + "_" + std::to_string(index) +
  536. ": width=" + std::to_string(image.width) +
  537. ", height=" + std::to_string(image.height) +
  538. ", channels=" + std::to_string(image.channels) +
  539. ", data_size=" + std::to_string(image.data.size()));
  540. return "";
  541. }
  542. // Validate image data integrity
  543. const size_t expectedDataSize = static_cast<size_t>(image.width) * image.height * image.channels;
  544. if (image.data.size() != expectedDataSize) {
  545. LOG_WARNING("Image data size mismatch for " + requestId + "_" + std::to_string(index) +
  546. ": expected=" + std::to_string(expectedDataSize) +
  547. ", actual=" + std::to_string(image.data.size()));
  548. // Continue anyway, but log the warning
  549. }
  550. // Check if we can write to the directory
  551. std::ofstream testFile(filename + ".test");
  552. if (!testFile.is_open()) {
  553. LOG_ERROR("Cannot write to directory " + jobOutputDir +
  554. ": permission denied or disk full");
  555. return "";
  556. }
  557. testFile.close();
  558. std::filesystem::remove(filename + ".test");
  559. // Write PNG file using stb_image_write with detailed error logging
  560. LOG_DEBUG("Writing PNG file: " + filename +
  561. " (size: " + std::to_string(image.width) + "x" + std::to_string(image.height) +
  562. "x" + std::to_string(image.channels) + ")");
  563. int result = stbi_write_png(
  564. filename.c_str(),
  565. image.width,
  566. image.height,
  567. image.channels,
  568. image.data.data(),
  569. image.width * image.channels // stride in bytes
  570. );
  571. if (result == 0) {
  572. LOG_ERROR("stbi_write_png failed for " + filename);
  573. // Try to get more detailed error information
  574. LOG_ERROR("Image details:");
  575. LOG_ERROR(" Dimensions: " + std::to_string(image.width) + "x" + std::to_string(image.height));
  576. LOG_ERROR(" Channels: " + std::to_string(image.channels));
  577. LOG_ERROR(" Data size: " + std::to_string(image.data.size()) + " bytes");
  578. LOG_ERROR(" Expected size: " + std::to_string(expectedDataSize) + " bytes");
  579. LOG_ERROR(" Stride: " + std::to_string(image.width * image.channels) + " bytes");
  580. // Check if file was created but is empty
  581. if (std::filesystem::exists(filename)) {
  582. auto fileSize = std::filesystem::file_size(filename);
  583. LOG_ERROR(" File exists but size is: " + std::to_string(fileSize) + " bytes");
  584. if (fileSize == 0) {
  585. LOG_ERROR(" ERROR: Zero-byte file created - stbi_write_png returned false but file exists");
  586. }
  587. } else {
  588. LOG_ERROR(" File was not created");
  589. }
  590. // Check disk space
  591. try {
  592. auto space = std::filesystem::space(jobOutputDir);
  593. LOG_ERROR(" Available disk space: " + std::to_string(space.available / (1024 * 1024)) + " MB");
  594. } catch (const std::exception& e) {
  595. LOG_ERROR(" Could not check disk space: " + std::string(e.what()));
  596. }
  597. return "";
  598. }
  599. // Verify the file was created successfully and has content
  600. if (!std::filesystem::exists(filename)) {
  601. LOG_ERROR("ERROR: stbi_write_png returned success but file does not exist: " + filename);
  602. return "";
  603. }
  604. auto fileSize = std::filesystem::file_size(filename);
  605. if (fileSize == 0) {
  606. LOG_ERROR("ERROR: stbi_write_png returned success but created zero-byte file: " + filename);
  607. return "";
  608. }
  609. LOG_DEBUG("Successfully saved generated image to: " + filename +
  610. " (" + std::to_string(image.width) + "x" + std::to_string(image.height) +
  611. ", " + std::to_string(image.channels) + " channels, "
  612. + std::to_string(image.data.size()) + " data bytes, "
  613. + std::to_string(fileSize) + " file bytes)");
  614. return filename;
  615. }
  616. std::string samplingMethodToString(SamplingMethod method) {
  617. switch (method) {
  618. case SamplingMethod::EULER: return "euler";
  619. case SamplingMethod::EULER_A: return "euler_a";
  620. case SamplingMethod::HEUN: return "heun";
  621. case SamplingMethod::DPM2: return "dpm2";
  622. case SamplingMethod::DPMPP2S_A: return "dpmpp2s_a";
  623. case SamplingMethod::DPMPP2M: return "dpmpp2m";
  624. case SamplingMethod::DPMPP2MV2: return "dpmpp2mv2";
  625. case SamplingMethod::IPNDM: return "ipndm";
  626. case SamplingMethod::IPNDM_V: return "ipndm_v";
  627. case SamplingMethod::LCM: return "lcm";
  628. case SamplingMethod::DDIM_TRAILING: return "ddim_trailing";
  629. case SamplingMethod::TCD: return "tcd";
  630. default: return "euler";
  631. }
  632. }
  633. std::string schedulerToString(Scheduler scheduler) {
  634. switch (scheduler) {
  635. case Scheduler::DISCRETE: return "discrete";
  636. case Scheduler::KARRAS: return "karras";
  637. case Scheduler::EXPONENTIAL: return "exponential";
  638. case Scheduler::AYS: return "ays";
  639. case Scheduler::GITS: return "gits";
  640. case Scheduler::SMOOTHSTEP: return "smoothstep";
  641. case Scheduler::SGM_UNIFORM: return "sgm_uniform";
  642. case Scheduler::SIMPLE: return "simple";
  643. default: return "default";
  644. }
  645. }
  646. std::string jobStatusToString(GenerationStatus status) {
  647. switch (status) {
  648. case GenerationStatus::QUEUED: return "queued";
  649. case GenerationStatus::MODEL_LOADING: return "loading";
  650. case GenerationStatus::PROCESSING: return "processing";
  651. case GenerationStatus::COMPLETED: return "completed";
  652. case GenerationStatus::FAILED: return "failed";
  653. default: return "unknown";
  654. }
  655. }
  656. GenerationStatus stringToJobStatus(const std::string& status) {
  657. if (status == "queued") return GenerationStatus::QUEUED;
  658. if (status == "loading") return GenerationStatus::MODEL_LOADING;
  659. if (status == "processing") return GenerationStatus::PROCESSING;
  660. if (status == "completed") return GenerationStatus::COMPLETED;
  661. if (status == "failed") return GenerationStatus::FAILED;
  662. return GenerationStatus::QUEUED;
  663. }
  664. std::string jobTypeToString(JobType type) {
  665. switch (type) {
  666. case JobType::GENERATION: return "generation";
  667. case JobType::HASHING: return "hashing";
  668. default: return "unknown";
  669. }
  670. }
  671. JobType stringToJobType(const std::string& type) {
  672. if (type == "generation") return JobType::GENERATION;
  673. if (type == "hashing") return JobType::HASHING;
  674. return JobType::GENERATION;
  675. }
  676. SamplingMethod stringToSamplingMethod(const std::string& method) {
  677. if (method == "euler") return SamplingMethod::EULER;
  678. if (method == "euler_a") return SamplingMethod::EULER_A;
  679. if (method == "heun") return SamplingMethod::HEUN;
  680. if (method == "dpm2") return SamplingMethod::DPM2;
  681. if (method == "dpmpp2s_a") return SamplingMethod::DPMPP2S_A;
  682. if (method == "dpmpp2m") return SamplingMethod::DPMPP2M;
  683. if (method == "dpmpp2mv2") return SamplingMethod::DPMPP2MV2;
  684. if (method == "ipndm") return SamplingMethod::IPNDM;
  685. if (method == "ipndm_v") return SamplingMethod::IPNDM_V;
  686. if (method == "lcm") return SamplingMethod::LCM;
  687. if (method == "ddim_trailing") return SamplingMethod::DDIM_TRAILING;
  688. if (method == "tcd") return SamplingMethod::TCD;
  689. return SamplingMethod::DEFAULT;
  690. }
  691. Scheduler stringToScheduler(const std::string& scheduler) {
  692. if (scheduler == "discrete") return Scheduler::DISCRETE;
  693. if (scheduler == "karras") return Scheduler::KARRAS;
  694. if (scheduler == "exponential") return Scheduler::EXPONENTIAL;
  695. if (scheduler == "ays") return Scheduler::AYS;
  696. if (scheduler == "gits") return Scheduler::GITS;
  697. if (scheduler == "smoothstep") return Scheduler::SMOOTHSTEP;
  698. if (scheduler == "sgm_uniform") return Scheduler::SGM_UNIFORM;
  699. if (scheduler == "simple") return Scheduler::SIMPLE;
  700. return Scheduler::DEFAULT;
  701. }
  702. void saveJobToFile(const JobInfo& job) {
  703. try {
  704. // Create queue directory if it doesn't exist
  705. std::filesystem::create_directories(queueDir);
  706. // Create JSON object with enhanced fields
  707. nlohmann::json jobJson;
  708. // Basic fields
  709. jobJson["id"] = job.id;
  710. jobJson["type"] = jobTypeToString(job.type);
  711. jobJson["status"] = jobStatusToString(job.status);
  712. jobJson["prompt"] = job.prompt;
  713. jobJson["position"] = job.position;
  714. // Convert time points to milliseconds since epoch
  715. auto queuedMs = std::chrono::duration_cast<std::chrono::milliseconds>(
  716. job.queuedTime.time_since_epoch()).count();
  717. jobJson["queued_time"] = queuedMs;
  718. if (job.status != GenerationStatus::QUEUED) {
  719. auto startMs = std::chrono::duration_cast<std::chrono::milliseconds>(
  720. job.startTime.time_since_epoch()).count();
  721. jobJson["start_time"] = startMs;
  722. }
  723. if (job.status == GenerationStatus::COMPLETED || job.status == GenerationStatus::FAILED) {
  724. auto endMs = std::chrono::duration_cast<std::chrono::milliseconds>(
  725. job.endTime.time_since_epoch()).count();
  726. jobJson["end_time"] = endMs;
  727. }
  728. // Enhanced fields for repeatable generation
  729. if (!job.modelName.empty()) {
  730. jobJson["model_name"] = job.modelName;
  731. jobJson["model_hash"] = job.modelHash;
  732. jobJson["model_path"] = job.modelPath;
  733. }
  734. if (!job.negativePrompt.empty()) {
  735. jobJson["negative_prompt"] = job.negativePrompt;
  736. }
  737. jobJson["width"] = job.width;
  738. jobJson["height"] = job.height;
  739. jobJson["batch_count"] = job.batchCount;
  740. jobJson["steps"] = job.steps;
  741. jobJson["cfg_scale"] = job.cfgScale;
  742. jobJson["sampling_method"] = samplingMethodToString(job.samplingMethod);
  743. jobJson["scheduler"] = schedulerToString(job.scheduler);
  744. jobJson["seed"] = job.seed;
  745. jobJson["actual_seed"] = job.actualSeed;
  746. jobJson["request_type"] = job.requestType;
  747. jobJson["strength"] = job.strength;
  748. jobJson["control_strength"] = job.controlStrength;
  749. jobJson["clip_skip"] = job.clipSkip;
  750. jobJson["n_threads"] = job.nThreads;
  751. jobJson["offload_params_to_cpu"] = job.offloadParamsToCpu;
  752. jobJson["clip_on_cpu"] = job.clipOnCpu;
  753. jobJson["vae_on_cpu"] = job.vaeOnCpu;
  754. jobJson["diffusion_flash_attn"] = job.diffusionFlashAttn;
  755. jobJson["diffusion_conv_direct"] = job.diffusionConvDirect;
  756. jobJson["vae_conv_direct"] = job.vaeConvDirect;
  757. jobJson["generation_time"] = job.generationTime;
  758. // Image paths for complex operations
  759. if (!job.initImageData.empty()) {
  760. jobJson["init_image_path"] = job.initImageData;
  761. }
  762. if (!job.controlImageData.empty()) {
  763. jobJson["control_image_path"] = job.controlImageData;
  764. }
  765. if (!job.maskImageData.empty()) {
  766. jobJson["mask_image_path"] = job.maskImageData;
  767. }
  768. // Model paths for advanced usage
  769. if (!job.clipLPath.empty()) {
  770. jobJson["clip_l_path"] = job.clipLPath;
  771. }
  772. if (!job.clipGPath.empty()) {
  773. jobJson["clip_g_path"] = job.clipGPath;
  774. }
  775. if (!job.vaePath.empty()) {
  776. jobJson["vae_path"] = job.vaePath;
  777. }
  778. if (!job.taesdPath.empty()) {
  779. jobJson["taesd_path"] = job.taesdPath;
  780. }
  781. if (!job.controlNetPath.empty()) {
  782. jobJson["controlnet_path"] = job.controlNetPath;
  783. }
  784. if (!job.embeddingDir.empty()) {
  785. jobJson["embedding_dir"] = job.embeddingDir;
  786. }
  787. if (!job.loraModelDir.empty()) {
  788. jobJson["lora_model_dir"] = job.loraModelDir;
  789. }
  790. if (!job.esrganPath.empty()) {
  791. jobJson["esrgan_path"] = job.esrganPath;
  792. }
  793. jobJson["upscale_factor"] = job.upscaleFactor;
  794. // Progress and timing information
  795. jobJson["progress"] = job.progress;
  796. jobJson["model_load_progress"] = job.modelLoadProgress;
  797. jobJson["generation_progress"] = job.generationProgress;
  798. jobJson["current_step"] = job.currentStep;
  799. jobJson["total_steps"] = job.totalSteps;
  800. jobJson["time_elapsed"] = job.timeElapsed;
  801. jobJson["time_remaining"] = job.timeRemaining;
  802. jobJson["speed"] = job.speed;
  803. jobJson["first_generation_callback"] = job.firstGenerationCallback;
  804. // Output information
  805. jobJson["output_files"] = job.outputFiles;
  806. jobJson["error_message"] = job.errorMessage;
  807. // Write to file
  808. std::string filename = queueDir + "/" + job.id + ".json";
  809. std::ofstream file(filename);
  810. if (file.is_open()) {
  811. file << jobJson.dump(2);
  812. file.close();
  813. }
  814. } catch (const std::exception& e) {
  815. LOG_ERROR("Error saving job to file: " + std::string(e.what()));
  816. }
  817. }
  818. void loadJobsFromDisk() {
  819. try {
  820. if (!std::filesystem::exists(queueDir)) {
  821. return;
  822. }
  823. LOG_INFO("Loading persisted jobs from: " + queueDir);
  824. int loadedCount = 0;
  825. for (const auto& entry : std::filesystem::directory_iterator(queueDir)) {
  826. if (entry.path().extension() != ".json") {
  827. continue;
  828. }
  829. try {
  830. std::ifstream file(entry.path());
  831. if (!file.is_open()) {
  832. continue;
  833. }
  834. nlohmann::json jobJson = nlohmann::json::parse(file);
  835. file.close();
  836. // Reconstruct JobInfo
  837. JobInfo job;
  838. job.id = jobJson["id"];
  839. job.type = stringToJobType(jobJson["type"]);
  840. job.status = stringToJobStatus(jobJson["status"]);
  841. job.prompt = jobJson["prompt"];
  842. job.position = jobJson["position"];
  843. // Reconstruct time points
  844. auto queuedMs = jobJson["queued_time"].get<int64_t>();
  845. job.queuedTime = std::chrono::system_clock::time_point(
  846. std::chrono::milliseconds(queuedMs));
  847. if (jobJson.contains("start_time")) {
  848. auto startMs = jobJson["start_time"].get<int64_t>();
  849. job.startTime = std::chrono::system_clock::time_point(
  850. std::chrono::milliseconds(startMs));
  851. }
  852. if (jobJson.contains("end_time")) {
  853. auto endMs = jobJson["end_time"].get<int64_t>();
  854. job.endTime = std::chrono::system_clock::time_point(
  855. std::chrono::milliseconds(endMs));
  856. }
  857. if (jobJson.contains("output_files")) {
  858. job.outputFiles = jobJson["output_files"].get<std::vector<std::string>>();
  859. }
  860. if (jobJson.contains("error_message")) {
  861. job.errorMessage = jobJson["error_message"];
  862. }
  863. // Load enhanced fields (with backward compatibility)
  864. if (jobJson.contains("model_name")) {
  865. job.modelName = jobJson["model_name"];
  866. job.modelHash = jobJson.value("model_hash", "");
  867. job.modelPath = jobJson.value("model_path", "");
  868. }
  869. if (jobJson.contains("negative_prompt")) {
  870. job.negativePrompt = jobJson["negative_prompt"];
  871. }
  872. if (jobJson.contains("width")) job.width = jobJson["width"];
  873. if (jobJson.contains("height")) job.height = jobJson["height"];
  874. if (jobJson.contains("batch_count")) job.batchCount = jobJson["batch_count"];
  875. if (jobJson.contains("steps")) job.steps = jobJson["steps"];
  876. if (jobJson.contains("cfg_scale")) job.cfgScale = jobJson["cfg_scale"];
  877. if (jobJson.contains("sampling_method")) {
  878. std::string samplingMethodStr = jobJson["sampling_method"];
  879. job.samplingMethod = stringToSamplingMethod(samplingMethodStr);
  880. }
  881. if (jobJson.contains("scheduler")) {
  882. std::string schedulerStr = jobJson["scheduler"];
  883. job.scheduler = stringToScheduler(schedulerStr);
  884. }
  885. if (jobJson.contains("seed")) job.seed = jobJson["seed"];
  886. if (jobJson.contains("actual_seed")) job.actualSeed = jobJson["actual_seed"];
  887. if (jobJson.contains("request_type")) job.requestType = jobJson["request_type"];
  888. if (jobJson.contains("strength")) job.strength = jobJson["strength"];
  889. if (jobJson.contains("control_strength")) job.controlStrength = jobJson["control_strength"];
  890. if (jobJson.contains("clip_skip")) job.clipSkip = jobJson["clip_skip"];
  891. if (jobJson.contains("n_threads")) job.nThreads = jobJson["n_threads"];
  892. if (jobJson.contains("offload_params_to_cpu")) job.offloadParamsToCpu = jobJson["offload_params_to_cpu"];
  893. if (jobJson.contains("clip_on_cpu")) job.clipOnCpu = jobJson["clip_on_cpu"];
  894. if (jobJson.contains("vae_on_cpu")) job.vaeOnCpu = jobJson["vae_on_cpu"];
  895. if (jobJson.contains("diffusion_flash_attn")) job.diffusionFlashAttn = jobJson["diffusion_flash_attn"];
  896. if (jobJson.contains("diffusion_conv_direct")) job.diffusionConvDirect = jobJson["diffusion_conv_direct"];
  897. if (jobJson.contains("vae_conv_direct")) job.vaeConvDirect = jobJson["vae_conv_direct"];
  898. if (jobJson.contains("generation_time")) job.generationTime = jobJson["generation_time"];
  899. // Image paths for complex operations
  900. if (jobJson.contains("init_image_path")) job.initImageData = jobJson["init_image_path"];
  901. if (jobJson.contains("control_image_path")) job.controlImageData = jobJson["control_image_path"];
  902. if (jobJson.contains("mask_image_path")) job.maskImageData = jobJson["mask_image_path"];
  903. // Model paths for advanced usage
  904. if (jobJson.contains("clip_l_path")) job.clipLPath = jobJson["clip_l_path"];
  905. if (jobJson.contains("clip_g_path")) job.clipGPath = jobJson["clip_g_path"];
  906. if (jobJson.contains("vae_path")) job.vaePath = jobJson["vae_path"];
  907. if (jobJson.contains("taesd_path")) job.taesdPath = jobJson["taesd_path"];
  908. if (jobJson.contains("controlnet_path")) job.controlNetPath = jobJson["controlnet_path"];
  909. if (jobJson.contains("embedding_dir")) job.embeddingDir = jobJson["embedding_dir"];
  910. if (jobJson.contains("lora_model_dir")) job.loraModelDir = jobJson["lora_model_dir"];
  911. if (jobJson.contains("esrgan_path")) job.esrganPath = jobJson["esrgan_path"];
  912. if (jobJson.contains("upscale_factor")) job.upscaleFactor = jobJson["upscale_factor"];
  913. // Progress and timing information
  914. if (jobJson.contains("progress")) job.progress = jobJson["progress"];
  915. if (jobJson.contains("model_load_progress")) job.modelLoadProgress = jobJson["model_load_progress"];
  916. if (jobJson.contains("generation_progress")) job.generationProgress = jobJson["generation_progress"];
  917. if (jobJson.contains("current_step")) job.currentStep = jobJson["current_step"];
  918. if (jobJson.contains("total_steps")) job.totalSteps = jobJson["total_steps"];
  919. if (jobJson.contains("time_elapsed")) job.timeElapsed = jobJson["time_elapsed"];
  920. if (jobJson.contains("time_remaining")) job.timeRemaining = jobJson["time_remaining"];
  921. if (jobJson.contains("speed")) job.speed = jobJson["speed"];
  922. if (jobJson.contains("first_generation_callback")) {
  923. job.firstGenerationCallback = jobJson["first_generation_callback"];
  924. } else {
  925. // For backward compatibility, default to true for loaded jobs
  926. job.firstGenerationCallback = true;
  927. }
  928. // Clean up stale processing jobs from server restart
  929. if (job.status == GenerationStatus::PROCESSING) {
  930. job.status = GenerationStatus::FAILED;
  931. job.errorMessage = "Server restarted while job was processing";
  932. job.endTime = std::chrono::system_clock::now();
  933. LOG_DEBUG("Marked stale job as failed: " + job.id);
  934. // Persist updated status to disk
  935. saveJobToFile(job);
  936. }
  937. // Add to active jobs
  938. std::lock_guard<std::mutex> lock(jobsMutex);
  939. activeJobs[job.id] = job;
  940. loadedCount++;
  941. } catch (const std::exception& e) {
  942. LOG_ERROR("Error loading job from " + entry.path().string() + ": " + std::string(e.what()));
  943. }
  944. }
  945. if (loadedCount > 0) {
  946. LOG_INFO("Loaded " + std::to_string(loadedCount) + " persisted job(s)");
  947. }
  948. } catch (const std::exception& e) {
  949. LOG_ERROR("Error loading jobs from disk: " + std::string(e.what()));
  950. }
  951. }
  952. HashResult performHashJob(const HashRequest& request) {
  953. HashResult result;
  954. result.requestId = request.id;
  955. result.success = false;
  956. result.modelsHashed = 0;
  957. auto startTime = std::chrono::system_clock::now();
  958. if (!modelManager) {
  959. result.errorMessage = "Model manager not available";
  960. result.status = GenerationStatus::FAILED;
  961. return result;
  962. }
  963. // Get list of models to hash
  964. std::vector<std::string> modelsToHash;
  965. if (request.modelNames.empty()) {
  966. // Hash all models without hashes
  967. auto allModels = modelManager->getAllModels();
  968. for (const auto& [name, info] : allModels) {
  969. if (info.sha256.empty() || request.forceRehash) {
  970. modelsToHash.push_back(name);
  971. }
  972. }
  973. } else {
  974. modelsToHash = request.modelNames;
  975. }
  976. LOG_DEBUG("Hashing " + std::to_string(modelsToHash.size()) + " model(s)...");
  977. // Hash each model
  978. for (const auto& modelName : modelsToHash) {
  979. std::string hash = modelManager->ensureModelHash(modelName, request.forceRehash);
  980. if (!hash.empty()) {
  981. result.modelHashes[modelName] = hash;
  982. result.modelsHashed++;
  983. } else {
  984. LOG_ERROR("Failed to hash model: " + modelName);
  985. }
  986. }
  987. auto endTime = std::chrono::system_clock::now();
  988. result.hashingTime = std::chrono::duration_cast<std::chrono::milliseconds>(
  989. endTime - startTime).count();
  990. result.success = result.modelsHashed > 0;
  991. result.status = result.success ? GenerationStatus::COMPLETED : GenerationStatus::FAILED;
  992. if (!result.success) {
  993. result.errorMessage = "Failed to hash any models";
  994. }
  995. return result;
  996. }
  997. ConversionResult performConversionJob(const ConversionRequest& request) {
  998. ConversionResult result;
  999. result.requestId = request.id;
  1000. result.success = false;
  1001. auto startTime = std::chrono::system_clock::now();
  1002. // Conversion start output removed from stdout
  1003. // Check if input file exists
  1004. namespace fs = std::filesystem;
  1005. if (!fs::exists(request.modelPath)) {
  1006. result.errorMessage = "Input model file not found: " + request.modelPath;
  1007. result.status = GenerationStatus::FAILED;
  1008. return result;
  1009. }
  1010. // Get original file size
  1011. try {
  1012. auto originalSize = fs::file_size(request.modelPath);
  1013. result.originalSize = formatFileSize(originalSize);
  1014. } catch (const std::exception& e) {
  1015. result.originalSize = "Unknown";
  1016. }
  1017. // Build conversion command
  1018. // Get the sd binary path from the CMake installation directory
  1019. std::string sdBinaryPath = "../build/stable-diffusion.cpp-install/bin/sd";
  1020. std::stringstream cmd;
  1021. cmd << sdBinaryPath << " --mode convert";
  1022. cmd << " -m \"" << request.modelPath << "\"";
  1023. cmd << " -o \"" << request.outputPath << "\"";
  1024. cmd << " --type " << request.quantizationType;
  1025. cmd << " 2>&1"; // Capture stderr
  1026. // Command execution output removed from stdout
  1027. // Execute conversion
  1028. FILE* pipe = popen(cmd.str().c_str(), "r");
  1029. if (!pipe) {
  1030. result.errorMessage = "Failed to execute conversion command";
  1031. result.status = GenerationStatus::FAILED;
  1032. return result;
  1033. }
  1034. // Read command output
  1035. char buffer[256];
  1036. std::string output;
  1037. while (fgets(buffer, sizeof(buffer), pipe) != nullptr) {
  1038. output += buffer;
  1039. // Progress output removed from stdout
  1040. }
  1041. int exitCode = pclose(pipe);
  1042. auto endTime = std::chrono::system_clock::now();
  1043. result.conversionTime = std::chrono::duration_cast<std::chrono::milliseconds>(
  1044. endTime - startTime).count();
  1045. if (exitCode != 0) {
  1046. result.errorMessage = "Conversion failed with exit code " + std::to_string(exitCode);
  1047. if (!output.empty()) {
  1048. result.errorMessage += "\nOutput: " + output;
  1049. }
  1050. result.status = GenerationStatus::FAILED;
  1051. return result;
  1052. }
  1053. // Check if output file was created
  1054. if (!fs::exists(request.outputPath)) {
  1055. result.errorMessage = "Output file was not created: " + request.outputPath;
  1056. result.status = GenerationStatus::FAILED;
  1057. return result;
  1058. }
  1059. // Get converted file size
  1060. try {
  1061. auto convertedSize = fs::file_size(request.outputPath);
  1062. result.convertedSize = formatFileSize(convertedSize);
  1063. } catch (const std::exception& e) {
  1064. result.convertedSize = "Unknown";
  1065. }
  1066. result.success = true;
  1067. result.status = GenerationStatus::COMPLETED;
  1068. result.outputPath = request.outputPath;
  1069. LOG_DEBUG("Conversion completed successfully!");
  1070. LOG_DEBUG(" Original size: " + result.originalSize);
  1071. LOG_DEBUG(" Converted size: " + result.convertedSize);
  1072. LOG_DEBUG(" Time: " + std::to_string(result.conversionTime) + "ms");
  1073. // Trigger model rescan after successful conversion
  1074. if (modelManager) {
  1075. LOG_DEBUG("Triggering model rescan...");
  1076. modelManager->scanModelsDirectory();
  1077. }
  1078. return result;
  1079. }
  1080. std::string formatFileSize(size_t bytes) {
  1081. const char* units[] = {"B", "KB", "MB", "GB", "TB"};
  1082. int unitIndex = 0;
  1083. double size = static_cast<double>(bytes);
  1084. while (size >= 1024.0 && unitIndex < 4) {
  1085. size /= 1024.0;
  1086. unitIndex++;
  1087. }
  1088. std::stringstream ss;
  1089. ss << std::fixed << std::setprecision(2) << size << " " << units[unitIndex];
  1090. return ss.str();
  1091. }
  1092. // Helper function to populate JobInfo from GenerationRequest
  1093. void populateJobInfoFromRequest(JobInfo& jobInfo, const GenerationRequest& request) {
  1094. // Model information
  1095. jobInfo.modelName = request.modelName;
  1096. jobInfo.modelHash = modelManager ? modelManager->getModelInfo(request.modelName).sha256 : "";
  1097. jobInfo.modelPath = modelManager ? modelManager->getModelInfo(request.modelName).path : "";
  1098. // Generation parameters
  1099. jobInfo.negativePrompt = request.negativePrompt;
  1100. jobInfo.width = request.width;
  1101. jobInfo.height = request.height;
  1102. jobInfo.batchCount = request.batchCount;
  1103. jobInfo.steps = request.steps;
  1104. jobInfo.cfgScale = request.cfgScale;
  1105. jobInfo.samplingMethod = request.samplingMethod;
  1106. jobInfo.scheduler = request.scheduler;
  1107. jobInfo.seed = request.seed;
  1108. jobInfo.strength = request.strength;
  1109. jobInfo.controlStrength = request.controlStrength;
  1110. jobInfo.clipSkip = request.clipSkip;
  1111. jobInfo.nThreads = request.nThreads;
  1112. jobInfo.offloadParamsToCpu = request.offloadParamsToCpu;
  1113. jobInfo.clipOnCpu = request.clipOnCpu;
  1114. jobInfo.vaeOnCpu = request.vaeOnCpu;
  1115. jobInfo.diffusionFlashAttn = request.diffusionFlashAttn;
  1116. jobInfo.diffusionConvDirect = request.diffusionConvDirect;
  1117. jobInfo.vaeConvDirect = request.vaeConvDirect;
  1118. // Request type - store image paths instead of image data
  1119. switch (request.requestType) {
  1120. case GenerationRequest::RequestType::TEXT2IMG:
  1121. jobInfo.requestType = "text2img";
  1122. break;
  1123. case GenerationRequest::RequestType::IMG2IMG:
  1124. jobInfo.requestType = "img2img";
  1125. // Store path to init image instead of the image data
  1126. jobInfo.initImageData = request.initImagePath; // Store path, not base64
  1127. break;
  1128. case GenerationRequest::RequestType::CONTROLNET:
  1129. jobInfo.requestType = "controlnet";
  1130. // Store path to control image instead of the image data
  1131. jobInfo.controlImageData = request.controlImagePath; // Store path, not base64
  1132. break;
  1133. case GenerationRequest::RequestType::UPSCALER:
  1134. jobInfo.requestType = "upscaler";
  1135. // Store path to input image instead of the image data
  1136. jobInfo.initImageData = request.initImagePath; // Store path, not base64
  1137. break;
  1138. case GenerationRequest::RequestType::INPAINTING:
  1139. jobInfo.requestType = "inpainting";
  1140. // Store paths to images instead of the image data
  1141. jobInfo.initImageData = request.initImagePath; // Store path, not base64
  1142. jobInfo.maskImageData = request.maskImagePath; // Store path, not base64
  1143. break;
  1144. }
  1145. // Model paths
  1146. jobInfo.clipLPath = request.clipLPath;
  1147. jobInfo.clipGPath = request.clipGPath;
  1148. jobInfo.vaePath = request.vaePath;
  1149. jobInfo.taesdPath = request.taesdPath;
  1150. jobInfo.controlNetPath = request.controlNetPath;
  1151. jobInfo.embeddingDir = request.embeddingDir;
  1152. jobInfo.loraModelDir = request.loraModelDir;
  1153. jobInfo.esrganPath = request.esrganPath;
  1154. jobInfo.upscaleFactor = request.upscaleFactor;
  1155. }
  1156. };
  1157. GenerationQueue::GenerationQueue(ModelManager* modelManager, int maxConcurrentGenerations,
  1158. const std::string& queueDir, const std::string& outputDir)
  1159. : pImpl(std::make_unique<Impl>()) {
  1160. pImpl->modelManager = modelManager;
  1161. pImpl->maxConcurrentGenerations = maxConcurrentGenerations;
  1162. pImpl->queueDir = queueDir;
  1163. pImpl->outputDir = outputDir;
  1164. LOG_INFO("GenerationQueue initialized");
  1165. LOG_INFO(" Max concurrent generations: " + std::to_string(maxConcurrentGenerations));
  1166. LOG_INFO(" Queue directory: " + queueDir);
  1167. LOG_INFO(" Output directory: " + outputDir);
  1168. // Load any existing jobs from disk
  1169. pImpl->loadJobsFromDisk();
  1170. }
  1171. GenerationQueue::~GenerationQueue() {
  1172. stop();
  1173. }
  1174. std::future<GenerationResult> GenerationQueue::enqueueRequest(const GenerationRequest& request) {
  1175. LOG_DEBUG("Enqueuing generation request: " + request.id);
  1176. LOG_DEBUG(" Prompt: " + request.prompt.substr(0, 100) +
  1177. (request.prompt.length() > 100 ? "..." : ""));
  1178. LOG_DEBUG(" Model: " + request.modelName);
  1179. LOG_DEBUG(" Size: " + std::to_string(request.width) + "x" + std::to_string(request.height));
  1180. LOG_DEBUG(" Steps: " + std::to_string(request.steps) + ", CFG: " + std::to_string(request.cfgScale));
  1181. // Create promise and future
  1182. auto promise = std::make_shared<std::promise<GenerationResult>>();
  1183. auto future = promise->get_future();
  1184. // Store the promise
  1185. {
  1186. std::lock_guard<std::mutex> lock(pImpl->jobsMutex);
  1187. pImpl->jobPromises[request.id] = std::move(*promise);
  1188. }
  1189. // Add to queue
  1190. {
  1191. std::lock_guard<std::mutex> lock(pImpl->queueMutex);
  1192. // Create job info with enhanced data
  1193. JobInfo jobInfo;
  1194. jobInfo.id = request.id;
  1195. jobInfo.type = JobType::GENERATION;
  1196. jobInfo.status = GenerationStatus::QUEUED;
  1197. jobInfo.prompt = request.prompt; // Store full prompt
  1198. jobInfo.queuedTime = std::chrono::system_clock::now();
  1199. jobInfo.position = pImpl->requestQueue.size() + 1;
  1200. // Populate all enhanced fields from the request
  1201. pImpl->populateJobInfoFromRequest(jobInfo, request);
  1202. // Store job info
  1203. {
  1204. std::lock_guard<std::mutex> jobsLock(pImpl->jobsMutex);
  1205. pImpl->activeJobs[request.id] = jobInfo;
  1206. }
  1207. // Persist to disk
  1208. pImpl->saveJobToFile(jobInfo);
  1209. pImpl->requestQueue.push(request);
  1210. pImpl->queueSize.store(pImpl->requestQueue.size());
  1211. }
  1212. // Notify worker thread
  1213. pImpl->queueCondition.notify_one();
  1214. return future;
  1215. }
  1216. std::future<HashResult> GenerationQueue::enqueueHashRequest(const HashRequest& request) {
  1217. auto promise = std::make_shared<std::promise<HashResult>>();
  1218. auto future = promise->get_future();
  1219. std::unique_lock<std::mutex> lock(pImpl->queueMutex);
  1220. // Create a generation request that acts as a placeholder for hash job
  1221. GenerationRequest hashJobPlaceholder;
  1222. hashJobPlaceholder.id = request.id;
  1223. hashJobPlaceholder.prompt = "HASH_JOB"; // Special marker
  1224. hashJobPlaceholder.modelName = request.modelNames.empty() ? "ALL_MODELS" : request.modelNames[0];
  1225. // Store promise for retrieval later
  1226. pImpl->hashPromises[request.id] = promise;
  1227. pImpl->hashRequests[request.id] = request;
  1228. pImpl->requestQueue.push(hashJobPlaceholder);
  1229. pImpl->queueCondition.notify_one();
  1230. LOG_DEBUG("Enqueued hash request: " + request.id);
  1231. return future;
  1232. }
  1233. std::future<ConversionResult> GenerationQueue::enqueueConversionRequest(const ConversionRequest& request) {
  1234. auto promise = std::make_shared<std::promise<ConversionResult>>();
  1235. auto future = promise->get_future();
  1236. std::unique_lock<std::mutex> lock(pImpl->queueMutex);
  1237. // Create a generation request that acts as a placeholder for conversion job
  1238. GenerationRequest conversionJobPlaceholder;
  1239. conversionJobPlaceholder.id = request.id;
  1240. conversionJobPlaceholder.prompt = "CONVERSION_JOB"; // Special marker
  1241. conversionJobPlaceholder.modelName = request.modelName;
  1242. // Store promise for retrieval later
  1243. pImpl->conversionPromises[request.id] = promise;
  1244. pImpl->conversionRequests[request.id] = request;
  1245. pImpl->requestQueue.push(conversionJobPlaceholder);
  1246. pImpl->queueCondition.notify_one();
  1247. LOG_DEBUG("Enqueued conversion request: " + request.id + " (model: " + request.modelName + ", type: " + request.quantizationType + ")");
  1248. return future;
  1249. }
  1250. size_t GenerationQueue::getQueueSize() const {
  1251. return pImpl->queueSize.load();
  1252. }
  1253. size_t GenerationQueue::getActiveGenerations() const {
  1254. return pImpl->activeGenerations.load();
  1255. }
  1256. std::vector<JobInfo> GenerationQueue::getQueueStatus() const {
  1257. std::vector<JobInfo> jobs;
  1258. std::lock_guard<std::mutex> lock(pImpl->jobsMutex);
  1259. jobs.reserve(pImpl->activeJobs.size());
  1260. for (const auto& pair : pImpl->activeJobs) {
  1261. jobs.push_back(pair.second);
  1262. }
  1263. // Sort by queued time, then by status
  1264. std::sort(jobs.begin(), jobs.end(), [](const JobInfo& a, const JobInfo& b) {
  1265. if (a.status != b.status) {
  1266. return static_cast<int>(a.status) < static_cast<int>(b.status);
  1267. }
  1268. return a.queuedTime < b.queuedTime;
  1269. });
  1270. return jobs;
  1271. }
  1272. JobInfo GenerationQueue::getJobInfo(const std::string& jobId) const {
  1273. std::lock_guard<std::mutex> lock(pImpl->jobsMutex);
  1274. auto it = pImpl->activeJobs.find(jobId);
  1275. if (it != pImpl->activeJobs.end()) {
  1276. return it->second;
  1277. }
  1278. return JobInfo{}; // Return empty job info if not found
  1279. }
  1280. bool GenerationQueue::cancelJob(const std::string& jobId) {
  1281. std::lock_guard<std::mutex> queueLock(pImpl->queueMutex);
  1282. std::lock_guard<std::mutex> jobsLock(pImpl->jobsMutex);
  1283. // Check if job is still queued
  1284. std::queue<GenerationRequest> newQueue;
  1285. bool found = false;
  1286. while (!pImpl->requestQueue.empty()) {
  1287. GenerationRequest request = pImpl->requestQueue.front();
  1288. pImpl->requestQueue.pop();
  1289. if (request.id == jobId) {
  1290. found = true;
  1291. // Update job status
  1292. auto it = pImpl->activeJobs.find(jobId);
  1293. if (it != pImpl->activeJobs.end()) {
  1294. it->second.status = GenerationStatus::FAILED;
  1295. it->second.endTime = std::chrono::system_clock::now();
  1296. }
  1297. // Set promise with cancellation error
  1298. auto promiseIt = pImpl->jobPromises.find(jobId);
  1299. if (promiseIt != pImpl->jobPromises.end()) {
  1300. GenerationResult result;
  1301. result.requestId = jobId;
  1302. result.success = false;
  1303. result.errorMessage = "Job cancelled by user";
  1304. result.generationTime = 0;
  1305. promiseIt->second.set_value(result);
  1306. pImpl->jobPromises.erase(promiseIt);
  1307. }
  1308. } else {
  1309. newQueue.push(request);
  1310. }
  1311. }
  1312. pImpl->requestQueue = newQueue;
  1313. pImpl->queueSize.store(pImpl->requestQueue.size());
  1314. return found;
  1315. }
  1316. void GenerationQueue::clearQueue() {
  1317. LOG_DEBUG("Clearing generation queue");
  1318. std::lock_guard<std::mutex> queueLock(pImpl->queueMutex);
  1319. std::lock_guard<std::mutex> jobsLock(pImpl->jobsMutex);
  1320. // Cancel all queued jobs
  1321. while (!pImpl->requestQueue.empty()) {
  1322. GenerationRequest request = pImpl->requestQueue.front();
  1323. pImpl->requestQueue.pop();
  1324. // Update job status
  1325. auto it = pImpl->activeJobs.find(request.id);
  1326. if (it != pImpl->activeJobs.end()) {
  1327. it->second.status = GenerationStatus::FAILED;
  1328. it->second.endTime = std::chrono::system_clock::now();
  1329. }
  1330. // Set promise with cancellation error
  1331. auto promiseIt = pImpl->jobPromises.find(request.id);
  1332. if (promiseIt != pImpl->jobPromises.end()) {
  1333. GenerationResult result;
  1334. result.requestId = request.id;
  1335. result.success = false;
  1336. result.errorMessage = "Queue cleared";
  1337. result.generationTime = 0;
  1338. promiseIt->second.set_value(result);
  1339. pImpl->jobPromises.erase(promiseIt);
  1340. }
  1341. }
  1342. pImpl->queueSize.store(0);
  1343. }
  1344. void GenerationQueue::start() {
  1345. if (pImpl->running.load()) {
  1346. LOG_DEBUG("GenerationQueue is already running");
  1347. return;
  1348. }
  1349. pImpl->running.store(true);
  1350. pImpl->stopRequested.store(false);
  1351. pImpl->workerThread = std::thread(&Impl::workerThreadFunction, pImpl.get());
  1352. LOG_DEBUG("GenerationQueue started");
  1353. }
  1354. void GenerationQueue::stop() {
  1355. if (!pImpl->running.load()) {
  1356. return;
  1357. }
  1358. LOG_DEBUG("Stopping GenerationQueue...");
  1359. pImpl->stopRequested.store(true);
  1360. pImpl->queueCondition.notify_all();
  1361. if (pImpl->workerThread.joinable()) {
  1362. pImpl->workerThread.join();
  1363. }
  1364. pImpl->running.store(false);
  1365. // Clear any remaining promises
  1366. std::lock_guard<std::mutex> lock(pImpl->jobsMutex);
  1367. for (auto& pair : pImpl->jobPromises) {
  1368. GenerationResult result;
  1369. result.requestId = pair.first;
  1370. result.success = false;
  1371. result.errorMessage = "Queue stopped";
  1372. result.generationTime = 0;
  1373. pair.second.set_value(result);
  1374. }
  1375. pImpl->jobPromises.clear();
  1376. LOG_DEBUG("GenerationQueue stopped");
  1377. }
  1378. bool GenerationQueue::isRunning() const {
  1379. return pImpl->running.load();
  1380. }
  1381. void GenerationQueue::setMaxConcurrentGenerations(int maxConcurrent) {
  1382. pImpl->maxConcurrentGenerations = maxConcurrent;
  1383. LOG_DEBUG("GenerationQueue max concurrent generations set to: " + std::to_string(maxConcurrent));
  1384. }