KTextAddons

ollamaplugin.cpp
1/*
2 SPDX-FileCopyrightText: 2025 Laurent Montel <montel@kde.org>
3
4 SPDX-License-Identifier: GPL-2.0-or-later
5*/
6
7#include "ollamaplugin.h"
8#include "autogeneratetext_ollama_debug.h"
9#include "core/textautogeneratechatmodel.h"
10#include "core/textautogeneratemanager.h"
11#include "ollamamanager.h"
12#include "ollamasettings.h"
13
14OllamaPlugin::OllamaPlugin(QObject *parent)
15 : TextAutogenerateText::TextAutogenerateTextPlugin{parent}
16{
17 if (!loadSettings()) {
18 qCWarning(AUTOGENERATETEXT_OLLAMA_LOG) << "Impossible to load settings";
19 return;
20 }
21
22 connect(OllamaManager::self(), &OllamaManager::modelsLoadDone, this, [this](const OllamaManager::ModelsInfo &modelinfo) {
23 if (modelinfo.hasError) {
24 setReady(false);
25 Q_EMIT errorOccurred(modelinfo.errorOccured);
26 } else {
27 setReady(true);
28 }
29 });
30 OllamaManager::self()->loadModels();
31}
32
33OllamaPlugin::~OllamaPlugin() = default;
34
35bool OllamaPlugin::loadSettings()
36{
37 setCurrentModel(OllamaSettings::model());
38 // TODO verify that server is ok.
39 return true;
40}
41
42void OllamaPlugin::clear()
43{
44 for (const auto &connection : std::as_const(mConnections)) {
45 disconnect(connection);
46 }
47 mConnections.clear();
48 // TODO clear all thread
49}
50
51void OllamaPlugin::setPrompt(const QString &text)
52{
53 // TODO
54}
55
56QString OllamaPlugin::currentModel() const
57{
58 return mCurrentModel;
59}
60
61void OllamaPlugin::setCurrentModel(const QString &newCurrentModel)
62{
63 mCurrentModel = newCurrentModel;
64}
65
66void OllamaPlugin::stop()
67{
68 clear();
69}
70
71void OllamaPlugin::sendToLLM(const QString &message)
72{
73 OllamaRequest req;
74 req.setMessage(message);
75 req.setModel(mCurrentModel);
76 /*
77 for (const auto &msg : m_messages | std::views::reverse) {
78 if (msg.sender == Sender::LLM) {
79 req.setContext(message.context);
80 break;
81 }
82 }
83 */
84 auto reply = OllamaManager::self()->getCompletion(req);
85
86 mConnections.insert(reply, connect(reply, &OllamaReply::contentAdded, this, [reply]() {
87 auto message = TextAutogenerateText::TextAutogenerateManager::self()->textAutoGenerateChatModel()->lastMessage();
88 message.setContent(reply->readResponse());
89 TextAutogenerateText::TextAutogenerateManager::self()->textAutoGenerateChatModel()->replaceLastMessage(message);
90 }));
91 mConnections.insert(reply, connect(reply, &OllamaReply::finished, this, [reply, this] {
92 auto message = TextAutogenerateText::TextAutogenerateManager::self()->textAutoGenerateChatModel()->lastMessage();
93 mConnections.remove(reply);
94 reply->deleteLater();
95 message.setInProgress(false);
96#if 0
97 message.context = message.llmReply->context();
98 message.info = message.llmReply->info();
99#endif
100 Q_EMIT finished(message); // TODO add message as argument ???
101 }));
102}
103
104#include "moc_ollamaplugin.cpp"
void finished()
Emits when the LLM has finished returning its response.
void contentAdded()
Emits when new content has been added to the response.
Q_EMITQ_EMIT
QMetaObject::Connection connect(const QObject *sender, PointerToMemberFunction signal, Functor functor)
bool disconnect(const QMetaObject::Connection &connection)
QFuture< ArgsType< Signal > > connect(Sender *sender, Signal signal)
This file is part of the KDE documentation.
Documentation copyright © 1996-2025 The KDE developers.
Generated on Fri Apr 25 2025 12:06:13 by doxygen 1.13.2 written by Dimitri van Heesch, © 1997-2006

KDE's Doxygen guidelines are available online.