package remote_http

import (
	"context"
	"errors"
	"fmt"
	"mylomen_server/common/config"
	"mylomen_server/common/dto"
	"mylomen_server/common/utils"
	"time"
)

type aiOllamaImpl struct {
}

func (aiOllamaImpl) Completions(ctx context.Context, prompt, groupId string) (*dto.AiRes, error) {
	logger := utils.NewLog("")

	url := config.Instance.Rpc.OllamaAddress + "/api/chat"

	var aiResult Result[dto.AiRes]

	var result chatVO
	resp, err := httpClient.R().
		SetBody(map[string]interface{}{
			"model":    "qwen2.5:0.5b",
			"stream":   false,
			"messages": []map[string]interface{}{{"role": "user", "content": prompt}},
		}).SetHeaders(map[string]string{
		"Content-Type": "application/json",
	}).SetSuccessResult(&result).Post(url)
	if err != nil {
		logger.Error(fmt.Sprintf("remote_http_wx_SendMsg error: %s", err.Error()))
		return nil, err
	}

	if !resp.IsSuccessState() {
		logger.Error(fmt.Sprintf("remote_http_wx_SendMsg resp:%+v", resp))
		return nil, errors.New("接口异常")
	}

	aiResult.Data.Completions = result.Message.Content

	return &aiResult.Data, nil
}

type generateVO struct {
	Model              string    `json:"model"`
	CreatedAt          time.Time `json:"created_at"`
	Response           string    `json:"response"`
	Done               bool      `json:"done"`
	DoneReason         string    `json:"done_reason"`
	Context            []int     `json:"context"`
	TotalDuration      int64     `json:"total_duration"`
	LoadDuration       int64     `json:"load_duration"`
	PromptEvalCount    int       `json:"prompt_eval_count"`
	PromptEvalDuration int       `json:"prompt_eval_duration"`
	EvalCount          int       `json:"eval_count"`
	EvalDuration       int64     `json:"eval_duration"`
}

type chatVO struct {
	Model     string    `json:"model"`
	CreatedAt time.Time `json:"created_at"`
	Message   struct {
		Role    string `json:"role"`
		Content string `json:"content"`
	} `json:"message"`
	DoneReason         string `json:"done_reason"`
	Done               bool   `json:"done"`
	TotalDuration      int64  `json:"total_duration"`
	LoadDuration       int    `json:"load_duration"`
	PromptEvalCount    int    `json:"prompt_eval_count"`
	PromptEvalDuration int    `json:"prompt_eval_duration"`
	EvalCount          int    `json:"eval_count"`
	EvalDuration       int    `json:"eval_duration"`
}