-
Notifications
You must be signed in to change notification settings - Fork 6.6k
Description
例行检查
- 我已确认目前没有类似 issue
- 我已完整查看过项目 README,以及项目文档
- 我使用了自己的 key,并确认我的 key 是可正常使用的
- 我理解并愿意跟进此 issue,协助测试和提供反馈
- 我理解并认可上述内容,并理解项目维护者精力有限,不遵循规则的 issue 可能会被无视或直接关闭
你的版本
- 公有云版本
- 私有部署版本, 具体版本号: v4.9.14
问题描述, 日志截图,配置文件等
问题描述:之前我刚刚部署好的时候上传图片的时候是可以正常上传和模型回复的,但是现在我上传图片是没有问题,模型回复的时候就会报错:400 Invalid image input
FastGPT的日志截图如下:

docker-compose.yaml文件如下:
version: '3.3'
services:
pg:
image: pgvector/pgvector:0.8.0-pg15 # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
container_name: pg
restart: always
networks:
- fastgpt
environment:
- POSTGRES_USER=username
- POSTGRES_PASSWORD=password
- POSTGRES_DB=postgres
volumes:
- ./pg/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'username', '-d', 'postgres']
interval: 5s
timeout: 5s
retries: 10
mongo:
image: mongo:5.0.18 # dockerhub
container_name: mongo
restart: always
networks:
- fastgpt
command: mongod --keyFile /data/mongodb.key --replSet rs0
environment:
- MONGO_INITDB_ROOT_USERNAME=myusername
- MONGO_INITDB_ROOT_PASSWORD=mypassword
volumes:
- ./mongo/data:/data/db
entrypoint:
- bash
- -c
- |
openssl rand -base64 128 > /data/mongodb.key
chmod 400 /data/mongodb.key
chown 999:999 /data/mongodb.key
echo 'const isInited = rs.status().ok === 1
if(!isInited){
rs.initiate({
_id: "rs0",
members: [
{ _id: 0, host: "mongo:27017" }
]
})
}' > /data/initReplicaSet.js
exec docker-entrypoint.sh "$$@" &
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do
echo "Waiting for MongoDB to start..."
sleep 2
done
mongo -u myusername -p mypassword --authenticationDatabase admin /data/initReplicaSet.js
wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
healthcheck:
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
volumes:
- ./redis/data:/data
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.14 # git
networks:
- fastgpt
restart: always
fastgpt-mcp-server:
container_name: fastgpt-mcp-server
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.14 # git
ports:
- 3005:3000
networks:
- fastgpt
restart: always
environment:
- FASTGPT_ENDPOINT=http://fastgpt:3000
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.14 # git
ports:
- 3000:3000
networks:
- fastgpt
depends_on:
- mongo
- pg
- sandbox
restart: always
environment:
- FE_DOMAIN=http://172.17.0.1:3000
- DEFAULT_ROOT_PSW=1234
- AIPROXY_API_ENDPOINT=http://aiproxy:3000
- AIPROXY_API_TOKEN=aiproxy
- DB_MAX_LINK=30
- TOKEN_KEY=any
- ROOT_KEY=root_key
- FILE_TOKEN_KEY=filetoken
- MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin
- PG_URL=postgresql://username:password@pg:5432/postgres
- REDIS_URL=redis://default:mypassword@redis:6379
- SANDBOX_URL=http://sandbox:3000
- LOG_LEVEL=info
- STORE_LOG_LEVEL=warn
- WORKFLOW_MAX_RUN_TIMES=1000
- WORKFLOW_MAX_LOOP_TIMES=100
- ALLOWED_ORIGINS=
- USE_IP_LIMIT=false
- CHAT_FILE_EXPIRE_TIME=7
volumes:
- ./config.json:/app/data/config.json
aiproxy:
image: ghcr.io/labring/aiproxy:v0.1.7
container_name: aiproxy
restart: unless-stopped
depends_on:
aiproxy_pg:
condition: service_healthy
networks:
- fastgpt
environment:
- ADMIN_KEY=aiproxy
- LOG_DETAIL_STORAGE_HOURS=1
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
- RETRY_TIMES=3
- BILLING_ENABLED=false
- DISABLE_MODEL_CONFIG=true
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
interval: 5s
timeout: 5s
retries: 10
aiproxy_pg:
image: pgvector/pgvector:0.8.0-pg15 # docker hub
restart: unless-stopped
container_name: aiproxy_pg
volumes:
- ./aiproxy_pg:/var/lib/postgresql/data
networks:
- fastgpt
environment:
TZ: Asia/Shanghai
POSTGRES_USER: postgres
POSTGRES_DB: aiproxy
POSTGRES_PASSWORD: aiproxy
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
interval: 5s
timeout: 5s
retries: 10
networks:
fastgpt:
config.json文件如下:
{
"feConfigs": {
"lafEnv": "https://laf.dev", // laf环境。 https://laf.run (杭州阿里云) ,或者私有化的laf环境。如果使用 Laf openapi 功能,需要最新版的 laf 。
"mcpServerProxyEndpoint": " http://localhost:3005" // mcp server 代理地址,例如: http://localhost:3005
},
"systemEnv": {
"vectorMaxProcess": 10, // 向量处理线程数量
"qaMaxProcess": 10, // 问答拆分线程数量
"vlmMaxProcess": 10, // 图片理解模型最大处理进程
"tokenWorkers": 30, // Token 计算线程保持数,会持续占用内存,不能设置太大。
"hnswEfSearch": 100, // 向量搜索参数,仅对 PG 和 OB 生效。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
"hnswMaxScanTuples": 100000, // 向量搜索最大扫描数据量,仅对 PG生效。
"customPdfParse": {
"url": "http://172.17.0.1:7231/v2/parse/file", // 自定义 PDF 解析服务地址
"key": "", // 自定义 PDF 解析服务密钥
"doc2xKey": "", // doc2x 服务密钥
"price": 0 // PDF 解析服务价格
}
}
}
复现步骤
预期结果
模型正常返回response,而不是只返回了我指定的部分response,而LLMs真正的输出结果没有返回
相关截图