1. Integration Example
RunningHub-API
  • Log of Update
  • Getting Started
    • Instructions for Use
    • About nodeInfoList
    • About Enterprise ComfyUI API
    • Native ComfyUI API Integration Guide
    • API Error Code Reference
  • Quick Create
    • About Quick Create Invocation
    • Obtain quick create - model library style parameter data
      POST
    • Initiate Quick Create Task
      POST
  • Integration Example
    • Complete integration example
    • Complete Integration Example – Advanced Edition
    • Task Progress Display Example
    • Full Workflow Integration Example
  • Standard Model API
    • Video Generation & Processing
      • image-to-video
        • Vidu
          • Vidu-image-to-video-q3-pro-fast
          • Vidu-start-end-to-video-q3-pro-fast
          • Vidu-start-end-to-video-q2-turbo
          • Vidu-start-end-to-video-q2-pro
          • Vidu-image-to-video-q2-pro
          • Vidu-image-to-video-q2-turbo
          • Vidu-image-to-video-q3-pro
          • Vidu-image-to-video-q2-pro-fast
          • Vidu-start-end-to-video-q2-pro-fast
          • Vidu-image-to-video-q3-turbo
          • Vidu-start-end-to-video-q3-turbo
          • Vidu-start-end-to-video-q3-pro
        • kling
          • kling-v3.0-pro-image-to-video
          • kling-video-o3-pro/reference-to-video
          • kling-video-o1/image-to-video
          • kling-v3.0-std-image-to-video
          • kling-video-o3-std/reference-to-video
          • kling-video-o1/start-to-end
          • kling-video-o3-pro-image-to-video
          • kling-video-o3-std-image-to-video
          • kling-elements
          • kling-v2.5-turbo-std/image-to-video
          • kling-v2.5-turbo-pro/image-to-video
          • kling-v2.6-pro-image-to-video
        • wan
          • wan-2.7/image-to-video
          • wan-2.2/image-to-video
          • wan-2.6-reference-to-video
          • wan-2.6-reference-to-video-flash
          • wan-2.6-image-to-video-flash
          • wan-2.2-video/start-to-end
        • alibaba
          • wan
            • alibaba/wan-2.6/image-to-video
        • seedance
          • seedance-2.0/image-to-video
          • seedance-2.0-fast/image-to-video
          • seedance-v1.5-pro-image-to-video-fast
          • seedance-v1.5-pro-image-to-video
        • hailuo
          • hailuo-02-i2v-standard
          • hailuo-02-standard
          • hailuo-2.3/i2v-standard
          • hailuo-2.3-fast/image-to-video
          • hailuo-2.3-fast-pro/image-to-video
          • hailuo-2.3/i2v-pro
          • hailuo-02-i2v-pro
          • hailuo-02-fast
        • midjourney
          • midjourney-image-to-video
        • sora
          • sora-2/image-to-video-channel-low-price
          • sora-2/image-to-video-pro-official-stable
          • sora-2/image-to-video-pro-channel-low-price
          • sora-2/text-to-video-pro-official-stable
          • sora-2/image-to-video-realistic-official-stable
          • sora-2/image-to-video-official-stable
          • sora-2/image-to-video-channel-low-price
          • sora-2/image-to-video-pro-channel-low-price
        • xai
          • grok
            • xai/grok-imagine/image-to-video-channel-low-price
            • xai/grok-imagine/image-to-video-official-stable
        • google
          • veo3.1
            • google/veo3.1-pro/start-end-to-video-channel-low-price
            • google/veo3.1-fast/image-to-video-channel-low-price
            • google/veo3.1-fast/image-to-video-official-stable
            • google/veo3.1-pro/image-to-video-official-stable
            • google/veo3.1-fast/start-end-to-video-channel-low-price
            • google/veo3.1-pro/reference-to-video-official-stable
        • skyreels
          • skyreels-v4/image-to-video
        • ltx
          • ltx-2.3/image-to-video
          • ltx-2.3/image-to-video-lora
        • pixverse
          • pixverse-v6/image-to-video
      • reference-to-video
        • wan-2.7-reference-to-video
        • skyreels-v3/reference-to-video
        • Vidu-reference-to-video-q3
        • seedance-2.0/multimodal-video
        • Vidu-reference-to-video-q2
        • seedance-2.0-fast/multimodal-video
        • kling-video-o1-std/refrence-to-video
        • seedance-v1-lite-reference-to-video
        • skyreels-v4/omni-reference
        • Vidu-reference-to-video-q3-mix
      • text-to-video
        • Vidu
          • Vidu-text-to-video-q3-pro-fast
          • Vidu-text-to-video-q2
          • Vidu-text-to-video-q3-pro
          • Vidu-text-to-video-q3-turbo
        • kling
          • kling-video-o3-std-text-to-video
          • kling-video-o1/text-to-video
          • kling-v3.0-pro-text-to-video
          • kling-v3.0-std-text-to-video
          • kling-video-o3-pro-text-to-video
          • kling-v2.5-turbo-pro/text-to-video
          • kling-v2.6-pro-text-to-video
        • alibaba
          • wan
            • alibaba/wan-2.6/text-to-video
        • seedance
          • seedance-2.0/text-to-video
          • seedance-2.0-fast/text-to-video
          • seedance-v1.5-pro-text-to-video-fast
          • seedance-v1.5-pro-text-to-video
        • hailuo
          • hailuo-02-t2v-standard
          • hailuo-02-pro
          • hailuo-2.3-t2v-standard
          • hailuo-2.3-t2v-pro
          • hailuo-02-t2v-pro
        • sora
          • sora-2/text-to-video-channel-low-price
          • sora-2/text-to-video-pro-channel-low-price
          • sora-2/text-to-video-official-stable
          • sora-2/text-to-video-channel-low-price
          • sora-2/text-to-video-pro-channel-low-price
        • xai
          • grok
            • xai/grok-imagine/text-to-video-channel-low-price
            • xai/grok-imagine/text-to-video-official-stable
        • google
          • veo3.1
            • google/veo3.1-pro/text-to-video-channel-low-price
            • google/veo3.1-pro/text-to-video-official-stable
            • google/veo3.1-fast/video-extend-official-stable
            • google/veo3.1-pro/video-extend-official-stable
            • google/veo3.1-fast/text-to-video-channel-low-price
            • google/veo3.1-fast/text-to-video-official-stable
        • cinematic
          • cinematic-video-generator
        • wan
          • wan-2.7/text-to-video
          • wan-2.2/text-to-video
        • skyreels
          • skyreels-v4/text-to-video
        • ltx
          • ltx-2.3/text-to-video
          • ltx-2.3/text-to-video-lora
        • pixverse
          • pixverse-v6/text-to-video
      • video-edit
        • wan-2.7/video-edit
        • kling-video-o3-pro/video-edit
        • kling-video-o3-std/video-edit
        • kling-video-o1-std/edit-video
        • xai/grok-imagine/edit-video-official-stable
      • motion-control
        • kling-v3.0-pro-motion-control
        • kling-v3.0-std-motion-control
        • kling-v2.6-pro-motion-control
        • bytedance/dreamactor-v2
        • kling-v2.6-std-motion-control
      • video-tools
        • sora-upload-character-official
        • pixverse-v6/extend
        • pixverse-v6/transition
        • sora-upload-character-channel-low-price
        • rh-video-upscaler
        • rh-video-fps-increaser
      • video-effects
        • skyreels-v3/video-restyling
      • video-extend
        • wan-2.7/video-extend
        • skyreels-v3/single-shot-video-extension
        • skyreels-v3/shot-switching-video-extension
      • audio-to-video
        • kling-lip-sync/identify-face
        • kling-lip-sync/lip-sync-video
    • 3D Generation & Processing
      • text-to-3D
        • hunyuan3d-v3.1/text-to-3d
      • image-to-3D
        • hitem3d-v15/image-to-3d
        • hunyuan3d-v3.1/image-to-3d
        • hitem3d-v2/image-to-3d
        • hitem3d-v15/multi-image-to-3d
        • hitem3d-v2/multi-image-to-3d
        • hitem3d-portrait-v21/image-to-3d
        • hitem3d-portrait-v21/multi-image-to-3d
        • hitem3d-portrait-v20/image-to-3d
        • hitem3d-portrait-v20/multi-image-to-3d
        • hitem3d-portrait-v15/image-to-3d
        • hitem3d-portrait-v15/multi-image-to-3d
    • Audio Generation & Processing
      • text-to-audio
        • minimax/speech-2.8-hd
        • minimax/music-2.5
        • minimax/speech-02-hd
        • minimax/speech-02-turbo
        • minimax/speech-2.6-hd
        • minimax/speech-2.6-turbo
        • minimax/speech-2.8-turbo
        • minimax/voice-clone
        • kling-lip-sync/tts
    • Image Generation & Processing
      • reference-to-image
        • Vidu-reference-to-video-q2-pro
      • image-to-image
        • midjourney
          • midjourney-text-to-image-niji6
          • midjourney-text-to-image-v61
          • midjourney-text-to-image-v6
          • midjourney-text-to-image-v7
        • seedream
          • seedream-v4.5/image-to-image
          • seedream-v4/image-to-image
          • seedream-v5-lite/image-to-image
        • nano
          • nano-banana2-gemini31flash/image-to-image-channel-low-price
          • nano-banana-pro/edit-channel-low-price
          • nano-banana2-gemini31flash/image-to-image-official-stable
          • nano-banana/edit-channel-low-price
          • nano-banana/edit-official-stable
          • nano-banana-pro/edit-ultra-official-stable
          • nano-banana-pro/edit-official-stable
        • gpt
          • gpt-image-1.5/edit-channel-low-price
        • grok
          • grok-image/image-to-image/channel-low-price
        • qwen
          • qwen-image/edit-2511-lora
          • qwen-image/edit-2511
          • qwen-image-2.0-pro/image-edit
          • qwen-image-2.0/image-edit
        • z
          • z-image-turbo/image-to-image-lora
          • z-image-turbo/image-to-image
        • wan
          • wan-2.2/image-to-image
        • f
          • f-kontext-dev-lora
          • f-2-dev/edit-lora
          • f-2-dev/edit
          • f-2-klein-9b/edit
          • f-2-klein-4b/edit
          • f-2-klein-4b/edit-lora
      • text-to-image
        • midjourney
          • midjourney-text-to-image-niji7
        • nano
          • nano-banana-pro/text-to-image-channel-low-price
          • nano-banana2-gemini31flash/text-to-image-official-stable
          • nano-banana/text-to-image-channel-low-price
          • nano-banana-pro/text-to-image-ultra-official-stable
          • nano-banana2-gemini31flash/text-to-image-channel-low-price
          • nano-banana-pro/text-to-image-official-stable
          • nano-banana/text-to-image-official-stable
        • gpt
          • gpt-image-1.5/text-to-image-official-stable
          • gpt-image-1.5/text-to-image-channel-low-price
          • gpt-image-1.5/image-to-image-official-stable
        • seedream
          • seedream-v4.5/text-to-image
          • seedream-v5-lite/text-to-image
          • seedream-v4/text-to-image
        • grok
          • grok-image/text-to-image/channel-low-price
        • qwen
          • qwen-image/text-to-image-2512
          • qwen-image/text-to-image-2512-lora
          • qwen-image-2.0/text-to-image
          • qwen-image-2.0-pro/text-to-image
        • z
          • z-image/turbo-lora
          • z-image/turbo
        • wan
          • wan-2.2/text-to-image-lora
          • wan-2.7/text-to-image-pro
          • wan-2.7/text-to-image
        • f
          • f-krea-dev-lora
          • f-dev-lora
          • f-2-dev/text-to-image-lora
          • f-2-dev/text-to-image
          • f-2-klein-9b/text-to-image-lora
          • f-2-klein-9b/text-to-image
          • f-2-klein-4b/text-to-image
          • f-2-klein-4b/text-to-image-lora
          • f-dev
      • image-tools
        • wan-2.7/image-edit-pro
        • wan-2.7/image-edit
    • Other
      • pixverse-v5.6/text-to-video
      • pixverse-v5.6/image-to-video
  • Task Query & webhook
    • Check Task Status
      POST
    • Check Task Output
      POST
    • Get Webhook Event Details
      POST
    • Resend Specific Webhook Event
      POST
    • Query generation result (V2)
      POST
  • ComfyUI Workflows
    • Start ComfyUI Task 1 - Basic
      POST
    • Start ComfyUI Task 2 - Advanced
      POST
    • Get Workflow JSON
      POST
    • Cancel ComfyUI Task
      POST
  • AI App
    • Start AI App Task
    • Get API call examples for AI application
  • Resource Upload
    • 文件上传(新)
    • Upload Resource(image\video\audio\Compressed Files)
    • Upload Lora
  • Get Account Information
    POST
  • Schemas
    • Get Workflow JSON Request
    • TaskRunWebappByKeyRequest
    • Generate task submission results
    • Get Workflow JSON Response
    • Start ComfyUI Task Request 1
    • Start ComfyUI Task Request 2
    • Start ComfyUI Task Request -webhook
    • Start ComfyUI Task Response
    • TaskCreateResponse
    • Check Task Status Request
    • Node Input Information
    • Get Account Information Request
    • Upload Resource Request
    • Get Webhook Event Details Request
    • Resend Specific Webhook Request
    • R?
    • RWorkflowDuplicateResponse
    • RAccountStatusResponse
    • WorkflowDuplicateResponse
    • AccountStatusResponse
    • WorkflowDuplicateRequest
    • ApiUploadLoraRequest
    • RString
    • RTaskUploadResponse
    • TaskUploadResponse
  1. Integration Example

Complete Integration Example – Advanced Edition

RunningHub AI Application Interface (Advanced Interactive Script) User Manual

1. System Requirements and Operating Environment

1.1 Server Environment

ComponentRequirement
Operating SystemWindows 10/11, Linux (Ubuntu 20.04+), macOS
Python VersionPython 3.8+
Dependenciespip install flask requests
NetworkServer must have access to www.runninghub.cn for API calls and file uploads/downloads

1.2 Frontend Environment

ItemRequirement
BrowserRecommended: Chrome, Edge, Firefox latest versions
Local DisplayFrontend is HTML + JS, accessed via Flask HTTP service
File UploadImages, audio, video, ZIP files must be smaller than server allowed size (platform requirement <30MB)

1.3 Project Directory Structure

project/
│
├── app.py              # Flask backend script
├── index.html          # Frontend interface
└── uploads/            # Optional: for storing uploaded files (managed dynamically by Flask)

2. Backend Configuration and Startup

2.1 Configure API Key and WebAppId

  • API Key: Provided by RunningHub for API authentication
  • WebAppId: The AI application instance ID created on RunningHub

2.2 Start Flask Service

Open a terminal, navigate to the project directory, and run:

python app.py

By default, Flask listens on 0.0.0.0:5000. Open in browser:

http://localhost:5000/find

This will load the frontend interface index.html.


3. Frontend Interface Instructions

Frontend Interface Screenshot

3.1 Input API Key and WebAppId

  1. Enter at the top of the page:

    • API Key: Key provided by RunningHub
    • WebAppId: AI application instance ID
  2. Click Get Node Info:

    • Frontend sends request to /get_node_info
    • Backend returns node list and cover images
  3. Nodes are displayed on the left, covers on the right

3.2 Node Operations

Node Types

Node TypeOperation
STRINGText input box, enter string
LISTDropdown selection from options
IMAGE / AUDIO / VIDEOMedia upload and preview

Node Actions

  • Text Input: Type directly in textarea

  • Dropdown Selection: Choose option from dropdown

  • File Upload:

    1. Click Upload File button
    2. Select local file (supports IMAGE/AUDIO/VIDEO/ZIP)
    3. After upload, preview appears automatically and updates the node's fieldValue

3.3 Run AI Application

  • Once all nodes are filled, click Run AI Application button
  • Frontend sends node info (nodeInfoList2) to /save_nodes
  • Backend submits task to RunningHub
  • Task status and results are displayed on the right

File Preview and Download

File TypePreview Method
ImageDisplay preview
Audio/VideoPlay directly
Other filesProvide download link

4. Backend API Functions

4.1 /get_node_info

ItemDescription
MethodPOST
Parameters{ "apiKey": "<your_api_key>", "webappId": "<your_webapp_id>" }
FunctionGet current AI application's node list and cover images
ReturnNode list (nodeInfoList) and cover images (covers)

4.2 /upload_file

ItemDescription
MethodPOST
Form Fieldsfile: file to upload; fileType: type (image/audio/video/input)
FunctionUpload file to RunningHub and return file path
Return{ "success": true, "thirdPartyResponse": {...} }

4.3 /save_nodes

ItemDescription
MethodPOST
Parameters{ "webappId": "<your_webapp_id>", "nodeInfoList2": [...], "apiKey": "<your_api_key>" }
FunctionSubmit AI task to RunningHub, poll task status, and return results
StatusRunning, Queued, Success, Failed

4.4 /find

Directly returns index.html page for browser access


5. File Upload and Download Notes

  • Frontend uploads files using /upload_file to the third-party server

  • Supported file types:

    • IMAGE: jpg, png, webp
    • AUDIO: mp3, wav
    • VIDEO: mp4, avi, mov
    • ZIP or other types: provided via download link
  • After upload, node's fieldValue is updated with the file path


6. FAQs and Troubleshooting

IssuePossible CauseSolution
Cannot get node infoAPI Key or WebAppId incorrectVerify input and retry
File upload failsNetwork issues or unsupported file typeCheck network and file type
Task takes too longBackend polling timeout or busy serviceIncrease timeout or retry
File preview does not displayBrowser unsupported or URL incorrectCheck file type or download to view

7. Tips and Suggestions

  • 📷 Image Upload: Compress to 1-5MB for faster uploads
  • 🎬 Video Handling: Keep under 50MB for smoother preview
  • 💾 File Management: Check file links in the results panel after submission for backup
  • ⚡ LIST Nodes: Choosing correct option affects AI output

✅ Summary

The advanced automation script interface provides a complete workflow:

  • 🔄 Dynamic node rendering
  • 📤 File upload and preview
  • 🚀 AI task submission and polling
  • 📥 Generated file display and download

The system frontend renders nodes via HTML/JS, backend uses Flask API to connect with RunningHub. Users only need to configure API Key and WebAppId to run the full workflow.
backend code

from flask import Flask, request, jsonify, send_from_directory
import os
import http.client
import mimetypes
import json
import time
import requests

app = Flask(__name__)

THIRD_PARTY_HOST = "www.runninghub.cn"
THIRD_PARTY_PATH = "/task/openapi/upload"
WEBAPP_ID = ""
API_KEY = ""  # Global API key, will be updated by frontend

# Serve index.html from current directory
@app.route('/find')
def index():
    return send_from_directory(os.getcwd(), 'index.html')


API_HOST = "www.runninghub.cn"


# 1️⃣ Submit a task
def submit_task(webapp_id, node_info_list):
    conn = http.client.HTTPSConnection(API_HOST)
    payload = json.dumps({
        "webappId": webapp_id,
        "apiKey": API_KEY,
        "nodeInfoList": node_info_list
    })
    headers = {
        'Host': API_HOST,
        'Content-Type': 'application/json'
    }
    conn.request("POST", "/task/openapi/ai-app/run", payload, headers)
    res = conn.getresponse()
    data = json.loads(res.read().decode("utf-8"))
    conn.close()
    return data


# 3️⃣ Query task output results
def query_task_outputs(task_id):
    conn = http.client.HTTPSConnection(API_HOST)
    payload = json.dumps({
        "apiKey": API_KEY,
        "taskId": task_id
    })
    headers = {
        'Host': API_HOST,
        'Content-Type': 'application/json'
    }
    conn.request("POST", "/task/openapi/outputs", payload, headers)
    res = conn.getresponse()
    data = json.loads(res.read().decode("utf-8"))
    conn.close()
    return data


# Receive frontend request for node info
@app.route("/get_node_info", methods=["POST"])
def get_node_info():
    global API_KEY
    req = request.get_json()
    webapp_id = req.get("webappId")
    api_key = req.get("apiKey")  # API key sent from frontend
    print(api_key)
    API_KEY = api_key  # Update global API_KEY

    if not api_key or not webapp_id:
        return jsonify({"success": False, "message": "Missing apiKey or webappId"}), 400

    try:
        conn = http.client.HTTPSConnection("www.runninghub.cn")
        url = f"/api/webapp/apiCallDemo?apiKey={api_key}&webappId={webapp_id}"
        conn.request("GET", url, headers={})
        res = conn.getresponse()
        data = res.read()
        conn.close()

        # Parse JSON
        try:
            result = json.loads(data.decode("utf-8"))
        except ValueError:
            result = {"success": False, "message": "Non-JSON response from third party", "data": data.decode("utf-8")}

        return jsonify(result)

    except Exception as e:
        return jsonify({"success": False, "message": str(e)})


# File upload endpoint
@app.route("/upload_file", methods=["POST"])
def upload_file():
    file = request.files.get('file')
    if not file:
        return jsonify({"success": False, "message": "No file received"})

    file_type = request.form.get('fileType', 'input')

    url = "https://www.runninghub.cn/task/openapi/upload"
    headers = {'Host': 'www.runninghub.cn'}
    data = {'apiKey': API_KEY, 'fileType': file_type}

    files = {'file': (file.filename, file.stream, file.content_type)}

    response = requests.post(url, headers=headers, files=files, data=data)

    # Try parsing third-party JSON response
    try:
        third_party_data = response.json()
    except ValueError:
        third_party_data = response.text

    return jsonify({"success": True, "thirdPartyResponse": third_party_data})


# Receive frontend nodeInfoList2 and submit task
@app.route("/save_nodes", methods=["POST"])
def save_nodes():
    req = request.get_json()
    node_info_list = req.get("nodeInfoList2")
    webapp_id = req.get("webappId")
    api_key = req.get("apiKey")  # API key from frontend
    print(api_key)
    if api_key:
        global API_KEY
        API_KEY = api_key  # Update global API_KEY

    if not node_info_list:
        return jsonify({"success": False, "message": "nodeInfoList2 is empty"}), 400

    try:
        # Submit task
        submit_result = submit_task(webapp_id, node_info_list)
        if submit_result.get("code") != 0:
            return jsonify({"success": False, "message": "Task submission failed", "data": submit_result})

        task_id = submit_result["data"]["taskId"]

        # Poll for task execution result
        start_time = time.time()
        timeout = 600  # 10 minutes
        while True:
            outputs_result = query_task_outputs(task_id)
            code = outputs_result.get("code")
            msg = outputs_result.get("msg")
            data = outputs_result.get("data")

            # Success
            if code == 0 and data:
                print("🎉 Task completed successfully!")
                print(data)
                return {
                    "success": True,
                    "fileUrl": data,
                    "taskId": task_id,
                    "message": msg or "success"
                }

            # Task failed
            elif code == 805:
                failed_reason = data.get("failedReason") if data else None
                print("❌ Task failed!")
                if failed_reason:
                    print(f"Node {failed_reason.get('node_name')} failure reason: {failed_reason.get('exception_message')}")
                    print("Traceback:", failed_reason.get("traceback"))
                return {
                    "success": False,
                    "message": "Task execution failed",
                    "data": outputs_result
                }

            # Running or queued
            elif code in (804, 813):
                status_text = "running" if code == 804 else "queued"
                print(f"⏳ Task {status_text}...")

            else:
                print("⚠️ Unknown status:", outputs_result)

            # Timeout check
            if time.time() - start_time > timeout:
                print("⏰ Timeout exceeded (10 minutes), task not completed.")
                return {
                    "success": False,
                    "message": "Timeout exceeded (10 minutes)",
                    "data": outputs_result
                }

            time.sleep(5)

    except Exception as e:
        return jsonify({"success": False, "message": str(e)})


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000, debug=True)

frontend code

<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8" />
  <title>Node Info Dynamic Rendering Example</title>
  <style>
    body {
      font-family: "Microsoft YaHei", sans-serif;
      margin: 0;
      padding: 20px;
      background: #f8f8f8;
      display: flex;
      justify-content: space-between;
      align-items: flex-start;
      box-sizing: border-box;
    }

    /* Two columns, each 50% width */
    #container {
      width: 50%;
      padding-right: 20px;
      box-sizing: border-box;
    }
    #cover {
      width: 50%;
      display: flex;
      justify-content: center;
      align-items: flex-start;
      position: sticky;
      top: 20px;
    }

    #cover img {
      width: 90%;
      border-radius: 12px;
      box-shadow: 0 4px 12px rgba(0,0,0,0.15);
      background-color: #fff;
    }

    .node {
      background: #fff;
      border-radius: 10px;
      padding: 15px;
      margin-bottom: 20px;
      box-shadow: 0 2px 5px rgba(0,0,0,0.1);
    }

    .node h3 {
      margin: 0 0 10px;
      color: #333;
    }

    label {
      font-weight: bold;
      display: block;
      margin-top: 8px;
    }

    select, input[type=text], textarea {
      width: 100%;
      padding: 8px;
      border: 1px solid #ccc;
      border-radius: 6px;
      margin-top: 5px;
    }

    textarea {
      resize: vertical;
      min-height: 80px;
    }

    .desc {
      color: #666;
      font-size: 14px;
      margin-top: 4px;
    }

    .image-box {
      position: relative;
      width: 100%;
      max-width: 300px;
      border: 2px dashed #ccc;
      border-radius: 8px;
      overflow: hidden;
      background-color: #f0f0f0;
      display: flex;
      justify-content: center;
      align-items: center;
      cursor: pointer;
      transition: all 0.3s;
      margin-top: 10px;
    }
    .image-box:hover { border-color: #66aaff; }
    .image-box img { width: 100%; height: 100%; object-fit: cover; display: block; }

    #saveBtn {
      background-color: #4a90e2; /* Dark blue */
      color: #fff;
      padding: 10px 20px;
      border: none;
      border-radius: 6px;
      cursor: pointer;
      font-size: 16px;
      transition: background-color 0.3s;
    }

    #saveBtn.saved {
      background-color: #a0c4ff; /* Light blue */
    }
  </style>
</head>
<body>
  <!-- Top controls: API Key and WebAppId input -->
  <div id="controls" style="margin-bottom: 20px;">
    <label>API Key: </label>
    <input type="text" id="apiKeyInput" placeholder="Enter API Key" style="width: 260px;" />
    <br><br>
    <label>WebAppId: </label>
    <input type="text" id="webappIdInput" placeholder="Enter WebAppId" style="width: 260px;" />
    <button id="fetchBtn">Fetch Node Info</button>
  </div>

  <!-- Left container -->
  <div id="container"></div>

  <!-- Right container: cover + results -->
  <div id="rightSide" style="display: flex; flex-direction: column; width: 50%; align-items: center;">
    <!-- Cover image -->
    <div id="cover" style="position: relative; width: 300px;">
      <span style="
        position: absolute;
        top: 8px;
        left: 8px;
        background-color: rgba(0, 0, 0, 0.6);
        color: #fff;
        font-size: 14px;
        padding: 4px 8px;
        border-radius: 4px;
      ">Cover Image</span>
      <img src="" alt="Cover" style="width: 100%; border-radius: 8px;" />
    </div>

    <!-- Result information -->
    <div id="resultBox" style="margin-top: 12px; text-align: left; width: 300px;"></div>
  </div>

<script>
const defaultImage = "";
let API_KEY = ""; // Will be set from input
let currentWebAppId = ""; // Global WebAppId
const coverImg = document.querySelector("#cover img");
const nodeInfoList = [];
const container = document.getElementById("container");
let currentAspectRatio = "1:1";
let imageBox = null;

// Fetch node info from backend
document.getElementById("fetchBtn").addEventListener("click", async () => {
    API_KEY = document.getElementById("apiKeyInput").value.trim();
    currentWebAppId = document.getElementById("webappIdInput").value.trim();

    if (!currentWebAppId) return alert("Please enter WebAppId");

    try {
        const response = await fetch("/get_node_info", {
            method: "POST",
            headers: { "Content-Type": "application/json" },
            body: JSON.stringify({
                apiKey: API_KEY,
                webappId: currentWebAppId
            })
        });
        const result = await response.json();

        if (result.code !== 0) return alert("API request failed: " + result.msg);

        // Update global nodeInfoList
        nodeInfoList.length = 0;
        nodeInfoList.push(...(result.data.nodeInfoList || []));

        // Set cover image
        if (result.data?.covers?.length > 0) {
            coverImg.src = result.data.covers[0].thumbnailUri;
        } else {
            coverImg.src = "";
        }

        renderNodeInfoList();
        addRunButton();
    } catch (err) {
        console.error(err);
        alert("Request error, check console");
    }
});

// Render nodes dynamically
function renderNodeInfoList() {
    container.innerHTML = "";
    nodeInfoList.forEach(node => {
        const div = document.createElement("div");
        div.className = "node";

        const header = document.createElement("h3");
        header.textContent = `nodeName: ${node.nodeName} (nodeId: ${node.nodeId})`;
        div.appendChild(header);

        const desc = document.createElement("label");
        desc.textContent = `Description: ${node.description} (DescriptionEn: ${node.descriptionEn})`;
        div.appendChild(desc);

        const label = document.createElement("label");
        label.textContent = `fieldName: ${node.fieldName} (fieldType: ${node.fieldType})`;
        div.appendChild(label);

        const fieldInfo = document.createElement("label");
        fieldInfo.textContent = `fieldType: ${node.fieldType} (if LIST type, check fieldData for options)`;
        div.appendChild(fieldInfo);

        const valueLabel = document.createElement("label");
        valueLabel.textContent = `fieldValue: ${''}`;
        div.appendChild(valueLabel);

        if (node.fieldType === "LIST") {
            let options = [];
            try {
                options = JSON.parse(node.fieldData);
            } catch (e) { console.error(e); }

            const select = document.createElement("select");
            options.forEach(opt => {
                if (opt.name && opt.index) {
                    const option = document.createElement("option");
                    option.value = opt.index;
                    option.textContent = `${opt.name} - ${opt.description || ''}`;
                    if (opt.index === node.fieldValue) option.selected = true;
                    select.appendChild(option);
                }
            });
            div.appendChild(select);

            select.addEventListener("change", () => {
                if (node.fieldName === "aspect_ratio") {
                    currentAspectRatio = select.value;
                    if (imageBox) updateImageBoxRatio(imageBox, currentAspectRatio);
                }
            });

        } else if (node.fieldType === "STRING") {
            const textarea = document.createElement("textarea");
            textarea.value = node.fieldValue || "";
            div.appendChild(textarea);

            textarea.addEventListener("input", () => {
                node.fieldValue = textarea.value;
            });
        } else if (["IMAGE", "AUDIO", "VIDEO"].includes(node.fieldType)) {
            const mediaBox = document.createElement("div");
            mediaBox.className = "media-box";
            mediaBox.style.marginTop = "8px";
            mediaBox.style.display = "flex";
            mediaBox.style.alignItems = "center";
            mediaBox.style.gap = "10px";

            let previewElement;
            if (node.fieldType === "IMAGE") {
                previewElement = document.createElement("img");
                previewElement.src = node.fieldValue || defaultImage;
                previewElement.alt = node.fieldName;
                previewElement.style.width = "200px";
                previewElement.style.borderRadius = "8px";
            } else if (node.fieldType === "AUDIO") {
                previewElement = document.createElement("audio");
                previewElement.controls = true;
                if (node.fieldValue) previewElement.src = node.fieldValue;
                previewElement.style.width = "200px";
            } else if (node.fieldType === "VIDEO") {
                previewElement = document.createElement("video");
                previewElement.controls = true;
                if (node.fieldValue) previewElement.src = node.fieldValue;
                previewElement.style.width = "240px";
                previewElement.style.borderRadius = "8px";
            }

            mediaBox.appendChild(previewElement);

            const upload = document.createElement("input");
            upload.type = "file";
            if (node.fieldType === "IMAGE") upload.accept = "image/*";
            if (node.fieldType === "AUDIO") upload.accept = "audio/*";
            if (node.fieldType === "VIDEO") upload.accept = "video/*";
            upload.style.display = "none";

            const uploadButton = document.createElement("button");
            uploadButton.textContent = "Upload File";
            uploadButton.addEventListener("click", () => upload.click());

            upload.addEventListener("change", async e => {
                const file = e.target.files[0];
                if (!file) return;

                const reader = new FileReader();
                reader.onload = ev => {
                    previewElement.src = ev.target.result;
                };
                reader.readAsDataURL(file);

                const formData = new FormData();
                formData.append("file", file);
                formData.append("fileType", node.fieldType.toLowerCase());

                try {
                    const response = await fetch("/upload_file", {
                        method: "POST",
                        body: formData
                    });
                    const result = await response.json();

                    if (result.success) {
                        const data = result.thirdPartyResponse;
                        if (data.code === 0 && data.data && data.data.fileName) {
                            node.fieldValue = data.data.fileName;
                        }
                    } else {
                        alert("Upload failed: " + result.message);
                    }
                } catch (err) {
                    console.error("Upload error:", err);
                    alert("Upload error, check console");
                }
            });

            mediaBox.appendChild(uploadButton);
            div.appendChild(mediaBox);
            div.appendChild(upload);
        }

        container.appendChild(div);
    });
}

// Add run button
function addRunButton() {
    const saveBtn = document.createElement("button");
    saveBtn.id = "saveBtn";
    saveBtn.textContent = "Run AI App";
    container.appendChild(saveBtn);

    saveBtn.addEventListener("click", async () => {
        const nodeInfoList2 = nodeInfoList.map(node => {
            let updatedValue = node.fieldValue;
            const div = Array.from(container.querySelectorAll(".node")).find(d =>
                d.querySelector("h3")?.textContent.includes(node.nodeName)
            );

            if (div) {
                if (node.fieldType === "LIST") {
                    const select = div.querySelector("select");
                    if (select) updatedValue = select.value;
                } else if (node.fieldType === "STRING") {
                    const textarea = div.querySelector("textarea");
                    if (textarea) updatedValue = textarea.value;
                } else if (["IMAGE", "AUDIO", "VIDEO"].includes(node.fieldType)) {
                    updatedValue = node.fieldValue;
                }
            }

            const nodeObj = {
                nodeId: node.nodeId,
                fieldName: node.fieldName,
                fieldValue: updatedValue,
                description: node.description || ""
            };

            if (node.fieldType === "LIST") {
                nodeObj.fieldData = node.fieldData;
            }

            return nodeObj;
        });

        console.log("nodeInfoList2:", nodeInfoList2);

        try {
            const response = await fetch("/save_nodes", {
                method: "POST",
                headers: { "Content-Type": "application/json" },
                body: JSON.stringify({
                    nodeInfoList2,
                    webappId: currentWebAppId
                })
            });

            const result = await response.json();
            console.log("Backend response:", result);

            const coverDiv = document.getElementById("cover");
            let resultBox = document.getElementById("resultBox");
            if (!resultBox) {
                resultBox = document.createElement("div");
                resultBox.id = "resultBox";
                resultBox.style.marginTop = "12px";
                resultBox.style.textAlign = "left";
                coverDiv.appendChild(resultBox);
            }

            if (result.success) {
                const files = result.thirdPartyResponse?.data || result.data || result.fileUrl;

                if (Array.isArray(files) && files.length > 0) {
                    let html = `<p><strong>Task ID:</strong> ${result.taskId || "-"}</p>`;
                    html += `<p><strong>Results:</strong></p>`;

                    files.forEach((file, i) => {
                        html += `<div style="margin-bottom:12px;">`;
                        const type = file.fileType?.toLowerCase() || "";

                        if (["png", "jpg", "jpeg", "webp"].includes(type)) {
                            html += `<img src="${file.fileUrl}" alt="Generated Image ${i + 1}"
                                style="max-width:100%; border:1px solid #ccc; border-radius:6px; margin-top:8px;" />`;
                        } else if (["mp4", "mov", "avi"].includes(type)) {
                            html += `<video controls src="${file.fileUrl}"
                                style="max-width:100%; border-radius:6px; margin-top:8px;"></video>`;
                        } else if (["mp3", "wav"].includes(type)) {
                            html += `<audio controls src="${file.fileUrl}"
                                style="width:100%; margin-top:8px;"></audio>`;
                        } else {
                            html += `<a href="${file.fileUrl}" target="_blank">Download File ${i + 1}</a>`;
                        }

                        html += `<p><a href="${file.fileUrl}" target="_blank">👉 Open Original File (${type})</a></p>`;
                        html += `</div>`;
                    });

                    resultBox.innerHTML = html;
                } else {
                    resultBox.innerHTML = `<p style="color:red;">No generated files detected.</p>`;
                }

            } else {
                resultBox.innerHTML = `<p style="color:red;">Task submission failed: ${result.message || "Unknown error"}</p>`;
            }

        } catch (err) {
            console.error("Error saving to backend:", err);
            const coverDiv = document.getElementById("cover");
            let resultBox = document.getElementById("resultBox");
            if (!resultBox) {
                resultBox = document.createElement("div");
                resultBox.id = "resultBox";
                resultBox.style.marginTop = "12px";
                resultBox.style.textAlign = "left";
                coverDiv.appendChild(resultBox);
            }
            resultBox.innerHTML = `<p style="color:red;">Request error, check console log.</p>`;
        }
    });
}

function updateImageBoxRatio(box, ratioStr) {
    if (!ratioStr || ratioStr === "match_input_image") {
        box.style.aspectRatio = "auto";
    } else if (ratioStr.includes(":")) {
        const [w, h] = ratioStr.split(":").map(Number);
        box.style.aspectRatio = `${w} / ${h}`;
    }
}
</script>
</body>
</html>
Modified at 2026-03-10 12:15:41
Previous
Complete integration example
Next
Task Progress Display Example
Built with