inferencing-llm / app.py
Shyamnath's picture
Simplify app.py to use proxy_app directly
f075c85
raw
history blame contribute delete
284 Bytes
from fastapi import FastAPI
from litellm.proxy.proxy_server import app as proxy_app
app = FastAPI(
title="LiteLLM API",
version="1.0.0"
)
# Use proxy app directly
app = proxy_app
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)