From 743b610768432ef49e25d53ada13c7bfd0c87a1e Mon Sep 17 00:00:00 2001
From: Agusti Fernandez <6601142+agustif@users.noreply.github.com>
Date: Mon, 27 Jan 2025 02:33:42 +0100
Subject: [PATCH 1/3] feat: add support for syntax highlighting on markdown
code blocks
Adds syntax highlighting on markdown codeblocks on LLM responses conversation view with a new code block view component. (Adds highlightr dependency)
---
fullmoon.xcodeproj/project.pbxproj | 25 ++++++--
fullmoon/Models/Data.swift | 19 +++---
fullmoon/Views/Chat/CodeBlockView.swift | 67 ++++++++++++++++++++++
fullmoon/Views/Chat/ConversationView.swift | 35 +++++++++--
4 files changed, 127 insertions(+), 19 deletions(-)
create mode 100644 fullmoon/Views/Chat/CodeBlockView.swift
diff --git a/fullmoon.xcodeproj/project.pbxproj b/fullmoon.xcodeproj/project.pbxproj
index 1d46d74..6c01007 100644
--- a/fullmoon.xcodeproj/project.pbxproj
+++ b/fullmoon.xcodeproj/project.pbxproj
@@ -7,6 +7,7 @@
objects = {
/* Begin PBXBuildFile section */
+ 0033E7FF2D4718D9001D469E /* Highlightr in Frameworks */ = {isa = PBXBuildFile; productRef = 0033E7FE2D4718D9001D469E /* Highlightr */; };
860F26A42CBC31D6004E8D40 /* MarkdownUI in Frameworks */ = {isa = PBXBuildFile; productRef = 860F26A32CBC31D6004E8D40 /* MarkdownUI */; };
869B97622D0DD46B0078DF5A /* MLXLMCommon in Frameworks */ = {isa = PBXBuildFile; productRef = 869B97612D0DD46B0078DF5A /* MLXLMCommon */; };
869B97642D0DD4D80078DF5A /* MLXLLM in Frameworks */ = {isa = PBXBuildFile; productRef = 869B97632D0DD4D80078DF5A /* MLXLLM */; };
@@ -42,6 +43,7 @@
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
+ 0033E7FF2D4718D9001D469E /* Highlightr in Frameworks */,
869B97642D0DD4D80078DF5A /* MLXLLM in Frameworks */,
869B97622D0DD46B0078DF5A /* MLXLMCommon in Frameworks */,
860F26A42CBC31D6004E8D40 /* MarkdownUI in Frameworks */,
@@ -98,6 +100,7 @@
860F26A32CBC31D6004E8D40 /* MarkdownUI */,
869B97612D0DD46B0078DF5A /* MLXLMCommon */,
869B97632D0DD4D80078DF5A /* MLXLLM */,
+ 0033E7FE2D4718D9001D469E /* Highlightr */,
);
productName = fullmoon;
productReference = 860E9CCE2CB055B000C5BB52 /* fullmoon.app */;
@@ -130,6 +133,7 @@
packageReferences = (
860E9CE22CB0564600C5BB52 /* XCRemoteSwiftPackageReference "mlx-swift-examples" */,
860F26A22CBC31D6004E8D40 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */,
+ 0033E7FD2D4718D9001D469E /* XCRemoteSwiftPackageReference "Highlightr" */,
);
preferredProjectObjectVersion = 77;
productRefGroup = 860E9CCF2CB055B000C5BB52 /* Products */;
@@ -285,7 +289,7 @@
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 2;
DEVELOPMENT_ASSET_PATHS = "\"fullmoon/Preview Content\"";
- DEVELOPMENT_TEAM = 2VT466P8NK;
+ DEVELOPMENT_TEAM = MMRT976ZJS;
ENABLE_HARDENED_RUNTIME = YES;
ENABLE_PREVIEWS = YES;
GENERATE_INFOPLIST_FILE = YES;
@@ -307,7 +311,7 @@
"LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]" = "@executable_path/../Frameworks";
MACOSX_DEPLOYMENT_TARGET = 14.0;
MARKETING_VERSION = 1.2;
- PRODUCT_BUNDLE_IDENTIFIER = me.mainfra.fullmoon;
+ PRODUCT_BUNDLE_IDENTIFIER = me.bleu.moon;
PRODUCT_NAME = "$(TARGET_NAME)";
REGISTER_APP_GROUPS = NO;
SDKROOT = auto;
@@ -331,7 +335,7 @@
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 2;
DEVELOPMENT_ASSET_PATHS = "\"fullmoon/Preview Content\"";
- DEVELOPMENT_TEAM = 2VT466P8NK;
+ DEVELOPMENT_TEAM = MMRT976ZJS;
ENABLE_HARDENED_RUNTIME = YES;
ENABLE_PREVIEWS = YES;
GENERATE_INFOPLIST_FILE = YES;
@@ -353,7 +357,7 @@
"LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]" = "@executable_path/../Frameworks";
MACOSX_DEPLOYMENT_TARGET = 14.0;
MARKETING_VERSION = 1.2;
- PRODUCT_BUNDLE_IDENTIFIER = me.mainfra.fullmoon;
+ PRODUCT_BUNDLE_IDENTIFIER = me.bleu.moon;
PRODUCT_NAME = "$(TARGET_NAME)";
REGISTER_APP_GROUPS = NO;
SDKROOT = auto;
@@ -392,6 +396,14 @@
/* End XCConfigurationList section */
/* Begin XCRemoteSwiftPackageReference section */
+ 0033E7FD2D4718D9001D469E /* XCRemoteSwiftPackageReference "Highlightr" */ = {
+ isa = XCRemoteSwiftPackageReference;
+ repositoryURL = "https://github.com/raspu/Highlightr.git";
+ requirement = {
+ kind = upToNextMajorVersion;
+ minimumVersion = 2.2.1;
+ };
+ };
860E9CE22CB0564600C5BB52 /* XCRemoteSwiftPackageReference "mlx-swift-examples" */ = {
isa = XCRemoteSwiftPackageReference;
repositoryURL = "https://github.com/ml-explore/mlx-swift-examples/";
@@ -411,6 +423,11 @@
/* End XCRemoteSwiftPackageReference section */
/* Begin XCSwiftPackageProductDependency section */
+ 0033E7FE2D4718D9001D469E /* Highlightr */ = {
+ isa = XCSwiftPackageProductDependency;
+ package = 0033E7FD2D4718D9001D469E /* XCRemoteSwiftPackageReference "Highlightr" */;
+ productName = Highlightr;
+ };
860F26A32CBC31D6004E8D40 /* MarkdownUI */ = {
isa = XCSwiftPackageProductDependency;
package = 860F26A22CBC31D6004E8D40 /* XCRemoteSwiftPackageReference "swift-markdown-ui" */;
diff --git a/fullmoon/Models/Data.swift b/fullmoon/Models/Data.swift
index 8f73b45..f0ba102 100644
--- a/fullmoon/Models/Data.swift
+++ b/fullmoon/Models/Data.swift
@@ -153,23 +153,24 @@ class Message {
}
@Model
-final class Thread: Sendable {
- @Attribute(.unique) var id: UUID
- var title: String?
+final class Thread {
+ @Attribute(.unique) let id: UUID
var timestamp: Date
-
- @Relationship var messages: [Message] = []
-
- var sortedMessages: [Message] {
- return messages.sorted { $0.timestamp < $1.timestamp }
- }
+ var messages: [Message]
init() {
self.id = UUID()
self.timestamp = Date()
+ self.messages = []
+ }
+
+ var sortedMessages: [Message] {
+ messages.sorted { $0.timestamp < $1.timestamp }
}
}
+extension Thread: @unchecked Sendable {}
+
enum AppTintColor: String, CaseIterable {
case monochrome, blue, brown, gray, green, indigo, mint, orange, pink, purple, red, teal, yellow
diff --git a/fullmoon/Views/Chat/CodeBlockView.swift b/fullmoon/Views/Chat/CodeBlockView.swift
new file mode 100644
index 0000000..a4eb303
--- /dev/null
+++ b/fullmoon/Views/Chat/CodeBlockView.swift
@@ -0,0 +1,67 @@
+import SwiftUI
+import MarkdownUI
+import Highlightr
+
+struct CodeBlockView: View {
+ let code: String
+ let language: String?
+ @Environment(\.colorScheme) var colorScheme
+
+ private let highlightr = Highlightr()
+
+ var platformBackgroundColor: Color {
+ #if os(iOS)
+ return Color(UIColor.secondarySystemBackground)
+ #elseif os(visionOS)
+ return Color(UIColor.separator)
+ #elseif os(macOS)
+ return Color(NSColor.secondarySystemFill)
+ #endif
+ }
+
+ var highlightedCode: NSAttributedString? {
+ guard let highlightr = highlightr else { return nil }
+ highlightr.setTheme(to: colorScheme == .dark ? "atom-one-dark" : "atom-one-light")
+ highlightr.theme.codeFont = .monospacedSystemFont(ofSize: 14, weight: .regular)
+ return highlightr.highlight(code, as: language)
+ }
+
+ var body: some View {
+ ScrollView(.horizontal, showsIndicators: false) {
+ VStack(alignment: .leading, spacing: 4) {
+ if let language {
+ Text(language)
+ .font(.caption)
+ .foregroundStyle(.secondary)
+ .padding(.bottom, 2)
+ }
+
+ if let highlightedCode {
+ Text(AttributedString(highlightedCode))
+ .textSelection(.enabled)
+ } else {
+ Text(code)
+ .font(.system(.body, design: .monospaced))
+ .textSelection(.enabled)
+ }
+ }
+ }
+ .padding()
+ .background(platformBackgroundColor)
+ .cornerRadius(8)
+ }
+}
+
+#Preview {
+ VStack {
+ CodeBlockView(
+ code: "print(\"Hello, World!\")",
+ language: "swift"
+ )
+ CodeBlockView(
+ code: "function hello() {\n console.log('Hello World');\n}",
+ language: "javascript"
+ )
+ }
+ .padding()
+}
\ No newline at end of file
diff --git a/fullmoon/Views/Chat/ConversationView.swift b/fullmoon/Views/Chat/ConversationView.swift
index 7900f09..61f0123 100644
--- a/fullmoon/Views/Chat/ConversationView.swift
+++ b/fullmoon/Views/Chat/ConversationView.swift
@@ -5,8 +5,9 @@
// Created by Xavier on 16/12/2024.
//
-import MarkdownUI
import SwiftUI
+import SwiftData
+import MarkdownUI
extension TimeInterval {
var formatted: String {
@@ -82,6 +83,26 @@ struct MessageView: View {
.foregroundStyle(.secondary)
}
+ private func parseCodeBlocks(_ text: String) -> AttributedString {
+ var config = AttributedString.MarkdownParsingOptions()
+ config.interpretedSyntax = .inlineOnlyPreservingWhitespace
+ return (try? AttributedString(markdown: text, options: config)) ?? AttributedString(text)
+ }
+
+ private func markdownTheme(foregroundColor: Color) -> Theme {
+ Theme()
+ .text {
+ ForegroundColor(foregroundColor)
+ }
+ .code {
+ FontFamilyVariant(.monospaced)
+ FontSize(.em(0.85))
+ }
+ .codeBlock { content in
+ CodeBlockView(code: content.content, language: content.language)
+ }
+ }
+
var body: some View {
HStack {
if message.role == .user { Spacer() }
@@ -101,9 +122,7 @@ struct MessageView: View {
.foregroundStyle(.fill)
Markdown(thinking)
.textSelection(.enabled)
- .markdownTextStyle {
- ForegroundColor(.secondary)
- }
+ .markdownTheme(markdownTheme(foregroundColor: .secondary))
}
.padding(.leading, 5)
}
@@ -121,12 +140,16 @@ struct MessageView: View {
if let afterThink {
Markdown(afterThink)
.textSelection(.enabled)
+ .markdownTheme(markdownTheme(foregroundColor: .primary))
}
}
.padding(.trailing, 48)
} else {
- Markdown(message.content)
- .textSelection(.enabled)
+ VStack(alignment: .leading, spacing: 8) {
+ Markdown(message.content)
+ .textSelection(.enabled)
+ .markdownTheme(markdownTheme(foregroundColor: .primary))
+ }
#if os(iOS) || os(visionOS)
.padding(.horizontal, 16)
.padding(.vertical, 12)
From 20b8333c2c6a3011803c5ca9d92b385aa1ae22ba Mon Sep 17 00:00:00 2001
From: Agusti Fernandez <6601142+agustif@users.noreply.github.com>
Date: Mon, 27 Jan 2025 02:39:16 +0100
Subject: [PATCH 2/3] Add copy button to code blocks
---
fullmoon/Views/Chat/CodeBlockView.swift | 78 ++++++++++++++++++++++---
1 file changed, 71 insertions(+), 7 deletions(-)
diff --git a/fullmoon/Views/Chat/CodeBlockView.swift b/fullmoon/Views/Chat/CodeBlockView.swift
index a4eb303..8ed1600 100644
--- a/fullmoon/Views/Chat/CodeBlockView.swift
+++ b/fullmoon/Views/Chat/CodeBlockView.swift
@@ -2,10 +2,28 @@ import SwiftUI
import MarkdownUI
import Highlightr
+private let languageMap: [String: String] = [
+ "js": "javascript",
+ "ts": "typescript",
+ "py": "python",
+ "rb": "ruby",
+ "shell": "bash",
+ "sh": "bash",
+ "jsx": "javascript",
+ "tsx": "typescript",
+ "yml": "yaml",
+ "md": "markdown",
+ "cpp": "c++",
+ "objective-c": "objectivec",
+ "objc": "objectivec",
+ "golang": "go"
+]
+
struct CodeBlockView: View {
let code: String
let language: String?
@Environment(\.colorScheme) var colorScheme
+ @State private var isCopied = false
private let highlightr = Highlightr()
@@ -19,23 +37,65 @@ struct CodeBlockView: View {
#endif
}
+ private func normalizeLanguage(_ language: String?) -> String? {
+ guard let language = language?.lowercased() else { return nil }
+ return languageMap[language] ?? language
+ }
+
var highlightedCode: NSAttributedString? {
guard let highlightr = highlightr else { return nil }
highlightr.setTheme(to: colorScheme == .dark ? "atom-one-dark" : "atom-one-light")
highlightr.theme.codeFont = .monospacedSystemFont(ofSize: 14, weight: .regular)
- return highlightr.highlight(code, as: language)
+ return highlightr.highlight(code, as: normalizeLanguage(language))
+ }
+
+ func copyToClipboard() {
+ #if os(macOS)
+ NSPasteboard.general.clearContents()
+ NSPasteboard.general.setString(code, forType: .string)
+ #else
+ UIPasteboard.general.string = code
+ let generator = UIImpactFeedbackGenerator(style: .light)
+ generator.impactOccurred()
+ #endif
+
+ withAnimation {
+ isCopied = true
+ }
+
+ DispatchQueue.main.asyncAfter(deadline: .now() + 2) {
+ withAnimation {
+ isCopied = false
+ }
+ }
}
var body: some View {
- ScrollView(.horizontal, showsIndicators: false) {
- VStack(alignment: .leading, spacing: 4) {
+ VStack(alignment: .leading, spacing: 4) {
+ HStack {
if let language {
Text(language)
.font(.caption)
.foregroundStyle(.secondary)
- .padding(.bottom, 2)
}
-
+ Spacer()
+ Button(action: copyToClipboard) {
+ HStack(spacing: 4) {
+ Image(systemName: isCopied ? "checkmark" : "doc.on.doc")
+ .font(.system(size: 12))
+ Text(isCopied ? "Copied!" : "Copy")
+ .font(.caption)
+ }
+ .padding(.vertical, 4)
+ .padding(.horizontal, 8)
+ .background(.secondary.opacity(0.1))
+ .cornerRadius(6)
+ }
+ .buttonStyle(.plain)
+ }
+ .padding(.bottom, 2)
+
+ ScrollView(.horizontal, showsIndicators: false) {
if let highlightedCode {
Text(AttributedString(highlightedCode))
.textSelection(.enabled)
@@ -53,14 +113,18 @@ struct CodeBlockView: View {
}
#Preview {
- VStack {
+ VStack(spacing: 20) {
CodeBlockView(
code: "print(\"Hello, World!\")",
language: "swift"
)
CodeBlockView(
code: "function hello() {\n console.log('Hello World');\n}",
- language: "javascript"
+ language: "js"
+ )
+ CodeBlockView(
+ code: "def hello():\n print('Hello World')",
+ language: "python"
)
}
.padding()
From e85da931202ec562d4ce378b79b606a5345dcb87 Mon Sep 17 00:00:00 2001
From: Agusti Fernandez <6601142+agustif@users.noreply.github.com>
Date: Fri, 14 Feb 2025 07:55:48 +0100
Subject: [PATCH 3/3] feat: use both local and remote servers instead of local
models
---
fullmoon.entitlements | 14 +
fullmoon.xcodeproj/project.pbxproj | 2 +-
fullmoon/ContentView.swift | 2 -
fullmoon/Info.plist | 5 +
fullmoon/Models/Data.swift | 188 +++++++++
fullmoon/Models/LLMEvaluator.swift | 392 +++++++++++++++++-
fullmoon/Models/RequestLLMIntent.swift | 4 +-
fullmoon/Models/ServerConfig.swift | 33 ++
fullmoon/Views/Chat/ConversationView.swift | 3 +-
...boardingDownloadingModelProgressView.swift | 2 +-
.../Views/Settings/ModelsSettingsView.swift | 332 +++++++++++++--
fullmoon/Views/Settings/SettingsView.swift | 2 +-
fullmoon/fullmoonApp.swift | 15 +-
13 files changed, 918 insertions(+), 76 deletions(-)
create mode 100644 fullmoon.entitlements
create mode 100644 fullmoon/Models/ServerConfig.swift
diff --git a/fullmoon.entitlements b/fullmoon.entitlements
new file mode 100644
index 0000000..c3243de
--- /dev/null
+++ b/fullmoon.entitlements
@@ -0,0 +1,14 @@
+
+
+
+
+ com.apple.security.app-sandbox
+
+ com.apple.security.network.client
+
+ com.apple.security.network.server
+
+ com.apple.security.network.client.local
+
+
+
\ No newline at end of file
diff --git a/fullmoon.xcodeproj/project.pbxproj b/fullmoon.xcodeproj/project.pbxproj
index 6c01007..61036de 100644
--- a/fullmoon.xcodeproj/project.pbxproj
+++ b/fullmoon.xcodeproj/project.pbxproj
@@ -339,9 +339,9 @@
ENABLE_HARDENED_RUNTIME = YES;
ENABLE_PREVIEWS = YES;
GENERATE_INFOPLIST_FILE = YES;
- INFOPLIST_FILE = fullmoon/Info.plist;
INFOPLIST_KEY_CFBundleDisplayName = fullmoon;
INFOPLIST_KEY_LSApplicationCategoryType = "public.app-category.productivity";
+ INFOPLIST_KEY_NSAppTransportSecurity_NSAllowsArbitraryLoads = YES;
"INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphoneos*]" = YES;
"INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphonesimulator*]" = YES;
"INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphoneos*]" = YES;
diff --git a/fullmoon/ContentView.swift b/fullmoon/ContentView.swift
index cd50a15..9233eb0 100644
--- a/fullmoon/ContentView.swift
+++ b/fullmoon/ContentView.swift
@@ -36,7 +36,6 @@ struct ContentView: View {
}
}
.environmentObject(appManager)
- .environment(llm)
.task {
if appManager.installedModels.count == 0 {
showOnboarding.toggle()
@@ -71,7 +70,6 @@ struct ContentView: View {
.sheet(isPresented: $showSettings) {
SettingsView(currentThread: $currentThread)
.environmentObject(appManager)
- .environment(llm)
.presentationDragIndicator(.hidden)
.if(appManager.userInterfaceIdiom == .phone) { view in
view.presentationDetents([.medium])
diff --git a/fullmoon/Info.plist b/fullmoon/Info.plist
index bc11256..2d45400 100644
--- a/fullmoon/Info.plist
+++ b/fullmoon/Info.plist
@@ -4,5 +4,10 @@
ITSAppUsesNonExemptEncryption
+ NSAppTransportSecurity
+
+ NSAllowsArbitraryLoads
+
+
diff --git a/fullmoon/Models/Data.swift b/fullmoon/Models/Data.swift
index f0ba102..cb47d61 100644
--- a/fullmoon/Models/Data.swift
+++ b/fullmoon/Models/Data.swift
@@ -18,6 +18,39 @@ class AppManager: ObservableObject {
@AppStorage("shouldPlayHaptics") var shouldPlayHaptics = true
@AppStorage("numberOfVisits") var numberOfVisits = 0
@AppStorage("numberOfVisitsOfLastRequest") var numberOfVisitsOfLastRequest = 0
+ @AppStorage("isUsingServer") var isUsingServer = false
+ @AppStorage("serverAPIKey") var serverAPIKeyStorage = ""
+ @AppStorage("selectedServerId") var selectedServerIdString: String?
+
+ @Published var servers: [ServerConfig] = []
+ @Published var selectedServerId: UUID? {
+ didSet {
+ selectedServerIdString = selectedServerId?.uuidString
+ // Reset current model when switching servers
+ currentModelName = nil
+ }
+ }
+
+ private let serversKey = "savedServers"
+
+ var currentServerURL: String {
+ if let server = currentServer {
+ return server.url
+ }
+ return ""
+ }
+
+ var currentServerAPIKey: String {
+ if let server = currentServer {
+ return server.apiKey
+ }
+ return serverAPIKeyStorage
+ }
+
+ var currentServer: ServerConfig? {
+ guard let id = selectedServerId else { return nil }
+ return servers.first { $0.id == id }
+ }
var userInterfaceIdiom: LayoutType {
#if os(visionOS)
@@ -43,7 +76,34 @@ class AppManager: ObservableObject {
}
}
+ // Add a dictionary to cache models for each server
+ @AppStorage("cachedServerModels") private var cachedServerModelsData: Data?
+ @Published private(set) var cachedServerModels: [UUID: [String]] = [:] {
+ didSet {
+ // Save to UserDefaults whenever cache updates
+ if let encoded = try? JSONEncoder().encode(cachedServerModels) {
+ cachedServerModelsData = encoded
+ }
+ }
+ }
+
init() {
+ // First load saved servers
+ loadServers()
+
+ // Then restore selected server from saved ID string
+ if let savedIdString = selectedServerIdString,
+ let savedId = UUID(uuidString: savedIdString) {
+ selectedServerId = savedId
+ }
+
+ // If we have servers but no selection, select the first one
+ if selectedServerId == nil && !servers.isEmpty {
+ selectedServerId = servers.first?.id
+ }
+
+ // Finally load cached models
+ loadCachedModels()
loadInstalledModelsFromUserDefaults()
}
@@ -124,6 +184,128 @@ class AppManager: ObservableObject {
return "moonphase.new.moon" // New Moon (fallback)
}
}
+
+ func modelSource() -> ModelSource {
+ isUsingServer ? .server : .local
+ }
+
+ private func loadServers() {
+ if let data = UserDefaults.standard.data(forKey: serversKey),
+ let decodedServers = try? JSONDecoder().decode([ServerConfig].self, from: data) {
+ servers = decodedServers
+ }
+ }
+
+ func saveServers() {
+ if let encoded = try? JSONEncoder().encode(servers) {
+ UserDefaults.standard.set(encoded, forKey: serversKey)
+ }
+ }
+
+ // Update server saving to happen immediately when servers change
+ func addServer(_ server: ServerConfig) {
+ servers.append(server)
+ saveServers()
+
+ // Auto-select the first server if none is selected
+ if selectedServerId == nil {
+ selectedServerId = server.id
+ }
+ }
+
+ func removeServer(_ server: ServerConfig) {
+ servers.removeAll { $0.id == server.id }
+ saveServers()
+
+ // Clear selection if removed server was selected
+ if selectedServerId == server.id {
+ selectedServerId = servers.first?.id
+ }
+ }
+
+ func updateServer(_ server: ServerConfig) {
+ if let index = servers.firstIndex(where: { $0.id == server.id }) {
+ servers[index] = server
+ saveServers()
+ }
+ }
+
+ func addServerWithMetadata(_ server: ServerConfig) async {
+ var updatedServer = server
+
+ // Try to fetch server metadata
+ let metadata = await fetchServerMetadata(url: server.url)
+ if let title = metadata.title {
+ updatedServer.name = title
+ }
+
+ await MainActor.run {
+ addServer(updatedServer)
+ selectedServerId = updatedServer.id
+ }
+ }
+
+ private func fetchServerMetadata(url: String) async -> (title: String?, version: String?) {
+ guard var baseURL = URL(string: url) else { return (nil, nil) }
+ // Remove /v1 or other API paths to get base URL
+ baseURL = baseURL.deletingLastPathComponent()
+
+ do {
+ let (data, _) = try await URLSession.shared.data(from: baseURL)
+ if let html = String(data: data, encoding: .utf8) {
+ // Extract title from HTML metadata
+ let title = extractTitle(from: html)
+ let version = extractVersion(from: html)
+ return (title, version)
+ }
+ } catch {
+ print("Error fetching server metadata: \(error)")
+ }
+ return (nil, nil)
+ }
+
+ private func extractTitle(from html: String) -> String? {
+ // Basic title extraction - could be made more robust
+ if let titleRange = html.range(of: "
.*?", options: .regularExpression) {
+ let title = html[titleRange]
+ .replacingOccurrences(of: "", with: "")
+ .replacingOccurrences(of: "", with: "")
+ .trimmingCharacters(in: .whitespacesAndNewlines)
+ return title.isEmpty ? nil : title
+ }
+ return nil
+ }
+
+ private func extractVersion(from html: String) -> String? {
+ // Basic version extraction - could be made more robust
+ if let metaRange = html.range(of: "content=\".*?version.*?\"", options: .regularExpression) {
+ let version = html[metaRange]
+ .replacingOccurrences(of: "content=\"", with: "")
+ .replacingOccurrences(of: "\"", with: "")
+ .trimmingCharacters(in: .whitespacesAndNewlines)
+ return version.isEmpty ? nil : version
+ }
+ return nil
+ }
+
+ private func loadCachedModels() {
+ if let data = cachedServerModelsData,
+ let decoded = try? JSONDecoder().decode([String: [String]].self, from: data) {
+ // Convert string keys back to UUIDs
+ cachedServerModels = Dictionary(uniqueKeysWithValues: decoded.compactMap { key, value in
+ guard let uuid = UUID(uuidString: key) else { return nil }
+ return (uuid, value)
+ })
+ }
+ }
+
+ func updateCachedModels(serverId: UUID, models: [String]) {
+ cachedServerModels[serverId] = models
+ }
+
+ func getCachedModels(for serverId: UUID) -> [String] {
+ return cachedServerModels[serverId] ?? []
+ }
}
enum Role: String, Codable {
@@ -258,3 +440,9 @@ enum AppFontSize: String, CaseIterable {
}
}
}
+
+enum ModelSource {
+ case local
+ case server
+}
+
diff --git a/fullmoon/Models/LLMEvaluator.swift b/fullmoon/Models/LLMEvaluator.swift
index 7de938d..bfd38bf 100644
--- a/fullmoon/Models/LLMEvaluator.swift
+++ b/fullmoon/Models/LLMEvaluator.swift
@@ -10,13 +10,13 @@ import MLXLLM
import MLXLMCommon
import MLXRandom
import SwiftUI
+import Observation
enum LLMEvaluatorError: Error {
case modelNotFound(String)
}
@Observable
-@MainActor
class LLMEvaluator {
var running = false
var cancelled = false
@@ -27,41 +27,68 @@ class LLMEvaluator {
var thinkingTime: TimeInterval?
var collapsed: Bool = false
var isThinking: Bool = false
-
+ var serverModels: [String] = []
+ var selectedServerModel: String?
+ var startTime: Date?
+ var isLoadingModels = false
+
var elapsedTime: TimeInterval? {
if let startTime {
return Date().timeIntervalSince(startTime)
}
-
return nil
}
- private var startTime: Date?
-
var modelConfiguration = ModelConfiguration.defaultModel
+ var loadState = LoadState.idle
- func switchModel(_ model: ModelConfiguration) async {
- progress = 0.0 // reset progress
- loadState = .idle
- modelConfiguration = model
- _ = try? await load(modelName: model.name)
+ enum LoadState {
+ case idle
+ case loaded(ModelContainer)
}
/// parameters controlling the output
let generateParameters = GenerateParameters(temperature: 0.5)
let maxTokens = 4096
-
- /// update the display every N tokens -- 4 looks like it updates continuously
- /// and is low overhead. observed ~15% reduction in tokens/s when updating
- /// on every token
let displayEveryNTokens = 4
- enum LoadState {
- case idle
- case loaded(ModelContainer)
+ // Add a property to store AppManager
+ private weak var appManager: AppManager?
+
+ // Add new property to track reasoning steps
+ var reasoningSteps: [String] = []
+
+ init(appManager: AppManager) {
+ self.appManager = appManager
+
+ // Restore server models if we have a current server
+ if let server = appManager.currentServer {
+ Task {
+ await MainActor.run {
+ // First load cached models
+ serverModels = appManager.getCachedModels(for: server.id)
+
+ // Then fetch fresh models in background
+ Task {
+ let models = await fetchServerModels(for: server)
+ if !models.isEmpty {
+ await MainActor.run {
+ serverModels = models
+ appManager.updateCachedModels(serverId: server.id, models: models)
+ }
+ }
+ }
+ }
+ }
+ }
}
- var loadState = LoadState.idle
+ func switchModel(_ model: ModelConfiguration) async {
+ progress = 0.0 // reset progress
+ loadState = .idle
+ modelConfiguration = model
+ _ = try? await load(modelName: model.name)
+ }
/// load and return the model -- can be called multiple times, subsequent calls will
/// just return the loaded model
@@ -98,8 +125,226 @@ class LLMEvaluator {
cancelled = true
}
- func generate(modelName: String, thread: Thread, systemPrompt: String) async -> String {
- guard !running else { return "" }
+ @MainActor
+ func generate(modelName: String, thread: Thread, systemPrompt: String = "") async -> String {
+ guard let appManager = appManager else { return "No app manager configured" }
+
+ // Check if this is an image generation request
+ if modelName.hasPrefix("dall-e") {
+ do {
+ guard let lastMessage = thread.messages.last else {
+ return "No prompt provided"
+ }
+ return try await generateImage(prompt: lastMessage.content)
+ } catch {
+ return "Image generation failed: \(error.localizedDescription)"
+ }
+ }
+
+ guard !running else {
+ print("Already running, skipping new request")
+ return ""
+ }
+
+ await MainActor.run {
+ running = true
+ isThinking = true
+ startTime = Date()
+ output = ""
+ }
+
+ defer {
+ Task { @MainActor in
+ running = false
+ isThinking = false
+ startTime = nil
+ }
+ }
+
+ if appManager.isUsingServer {
+ print("Using server mode")
+ return await generateWithServer(thread: thread, systemPrompt: systemPrompt)
+ } else {
+ print("Using local mode")
+ return await generateWithLocalModel(modelName: modelName, thread: thread, systemPrompt: systemPrompt)
+ }
+ }
+
+ private func generateWithServer(thread: Thread, systemPrompt: String) async -> String {
+ guard let appManager = self.appManager,
+ let server = appManager.currentServer,
+ let modelName = appManager.currentModelName else {
+ return "Error: Server configuration not available"
+ }
+
+ let finalServerType = server.type
+ let serverURL = server.url
+
+ print("π΅ Server type: \(String(describing: finalServerType))")
+ print("π΅ Current model: \(modelName)")
+ print("π΅ Current server URL: \(serverURL)")
+
+ // Build request
+ guard let url = URL(string: serverURL)?.appendingPathComponent("chat/completions") else {
+ return "Error: Invalid server URL"
+ }
+
+ var request = URLRequest(url: url)
+ request.httpMethod = "POST"
+ request.setValue("application/json", forHTTPHeaderField: "Content-Type")
+
+ if !server.apiKey.isEmpty {
+ request.setValue("Bearer \(server.apiKey)", forHTTPHeaderField: "Authorization")
+ }
+
+ // Build request body based on server type
+ let messages = thread.sortedMessages.map { message in
+ [
+ "role": message.role.rawValue,
+ "content": message.content
+ ]
+ }
+
+ var allMessages: [[String: String]]
+
+ // Initialize the base body structure with common parameters
+ var body: [String: Any] = [
+ "stream": true,
+ "model": modelName,
+ "temperature": 1
+ ]
+
+ // Handle different server types
+ switch finalServerType {
+ case .openai:
+ // For OpenAI models starting with "o", use "user" role
+ let systemRole = modelName.hasPrefix("o") ? "user" : "system"
+ let systemMessage = ["role": systemRole, "content": systemPrompt]
+ allMessages = [systemMessage] + messages
+
+ // Update the body parameters for different model types
+ if modelName.hasPrefix("dall-e") {
+ // Configure for image generation
+ body["n"] = 1
+ body["size"] = "1024x1024"
+ body["quality"] = "standard"
+ body["response_format"] = "url"
+ // Extract the prompt from the last message
+ if let lastMessage = messages.last {
+ body["prompt"] = lastMessage["content"]
+ }
+ } else if modelName.hasPrefix("o1-") {
+ body["max_completion_tokens"] = 2000 // Use max_completion_tokens for o1 models
+ } else {
+ body["max_tokens"] = 2000 // Use max_tokens for other models
+ }
+ default:
+ // For other servers, include system message and ttl
+ let systemMessage = ["role": "system", "content": systemPrompt]
+ allMessages = [systemMessage] + messages
+ body["max_tokens"] = 2000
+ body["ttl"] = 600
+ }
+
+ // Add messages to body after they're prepared
+ body["messages"] = allMessages
+
+ do {
+ let jsonData = try JSONSerialization.data(withJSONObject: body)
+ request.httpBody = jsonData
+ print("π€ Sending request with body: \(String(data: jsonData, encoding: .utf8) ?? "")")
+
+ let (bytes, _) = try await URLSession.shared.bytes(for: request)
+ var fullResponse = ""
+ reasoningSteps.removeAll() // Clear previous reasoning steps
+
+ for try await line in bytes.lines {
+ print("π© Received line: \(line)")
+
+ guard !line.isEmpty else {
+ print("β οΈ Empty line, skipping")
+ continue
+ }
+ guard line != "data: [DONE]" else {
+ print("β
Received DONE signal")
+ break
+ }
+ guard line.hasPrefix("data: ") else {
+ print("β οΈ Line doesn't start with 'data: ', skipping")
+ continue
+ }
+
+ let jsonString = String(line.dropFirst(6))
+ print("π Parsing JSON string: \(jsonString)")
+
+ guard let jsonData = jsonString.data(using: .utf8),
+ let json = try? JSONSerialization.jsonObject(with: jsonData) as? [String: Any] else {
+ print("β Failed to parse JSON data")
+ continue
+ }
+
+ print("π Parsed JSON: \(json)")
+
+ // Handle OpenAI format with reasoning
+ if finalServerType == .openai {
+ if modelName.hasPrefix("dall-e") {
+ if let data = json["data"] as? [[String: Any]],
+ let firstImage = data.first,
+ let imageUrl = firstImage["url"] as? String {
+ // Return the image URL in the response
+ fullResponse = ")"
+ await updateOutput(fullResponse)
+ }
+ } else if let choices = json["choices"] as? [[String: Any]] {
+ if let firstChoice = choices.first,
+ let delta = firstChoice["delta"] as? [String: Any] {
+
+ // Check for tool calls (reasoning steps)
+ if let toolCalls = delta["tool_calls"] as? [[String: Any]] {
+ for toolCall in toolCalls {
+ if let function = toolCall["function"] as? [String: Any],
+ let name = function["name"] as? String,
+ let arguments = function["arguments"] as? String {
+ reasoningSteps.append("π€ \(name): \(arguments)")
+ await updateOutput(fullResponse + "\n\n" + reasoningSteps.joined(separator: "\n"))
+ }
+ }
+ }
+
+ // Handle regular content
+ if let content = delta["content"] as? String {
+ fullResponse += content
+ await updateOutput(fullResponse + "\n\n" + reasoningSteps.joined(separator: "\n"))
+ }
+ }
+ }
+ } else {
+ // Handle other servers (Ollama, LM Studio)
+ if let choices = json["choices"] as? [[String: Any]],
+ let delta = choices.first?["delta"] as? [String: Any],
+ let content = delta["content"] as? String {
+ fullResponse += content
+ await updateOutput(fullResponse)
+ }
+ }
+ }
+
+ print("π Final response: \(fullResponse)")
+ return fullResponse
+
+ } catch {
+ print("β Error generating response: \(error)")
+ await updateOutput("Error: \(error.localizedDescription)")
+ return await output
+ }
+ }
+
+ private func generateWithLocalModel(modelName: String, thread: Thread, systemPrompt: String) async -> String {
+ print("Starting local model generation with model: \(modelName)")
+ guard !running else {
+ print("Already running, returning empty")
+ return ""
+ }
running = true
cancelled = false
@@ -159,4 +404,111 @@ class LLMEvaluator {
running = false
return output
}
+
+ @MainActor
+ func fetchServerModels(for server: ServerConfig) async -> [String] {
+ guard !server.url.isEmpty,
+ let url = URL(string: server.url)?.appendingPathComponent("models") else {
+ return []
+ }
+
+ var request = URLRequest(url: url)
+ request.httpMethod = "GET"
+ request.setValue("application/json", forHTTPHeaderField: "Content-Type")
+
+ if !server.apiKey.isEmpty {
+ request.setValue("Bearer \(server.apiKey)", forHTTPHeaderField: "Authorization")
+ }
+
+ do {
+ let (data, _) = try await URLSession.shared.data(for: request)
+ if let json = try JSONSerialization.jsonObject(with: data) as? [String: Any],
+ let models = json["data"] as? [[String: Any]] {
+ return models.compactMap { $0["id"] as? String }
+ }
+ } catch {
+ print("β Error fetching models: \(error.localizedDescription)")
+ }
+ return []
+ }
+
+ @MainActor
+ private func updateOutput(_ newOutput: String) {
+ output = newOutput
+ }
+
+ @MainActor
+ private func updateProgress(_ newProgress: Double) {
+ progress = newProgress
+ }
+
+ // Add new function for image generation
+ /// Generates an image using the DALL-E API
+ /// - Parameter prompt: The text description of the image to generate
+ /// - Returns: A markdown-formatted string containing the generated image URL
+ /// - Throws: NSError if the server configuration is invalid or the API request fails
+ @MainActor
+ func generateImage(prompt: String) async throws -> String {
+ guard let appManager = appManager,
+ let serverConfig = appManager.currentServer,
+ let modelName = appManager.currentModelName else {
+ throw NSError(domain: "LLMEvaluator", code: -1, userInfo: [NSLocalizedDescriptionKey: "No server configured"])
+ }
+
+ guard let url = URL(string: serverConfig.url)?.appendingPathComponent("images/generations") else {
+ throw NSError(domain: "LLMEvaluator", code: -1, userInfo: [NSLocalizedDescriptionKey: "Invalid URL"])
+ }
+
+ // Configure based on model version
+ let size = modelName == "dall-e-2" ? "1024x1024" : "1024x1024"
+ let quality = modelName == "dall-e-2" ? "standard" : "standard"
+
+ let body: [String: Any] = [
+ "prompt": prompt,
+ "n": 1,
+ "size": size,
+ "quality": quality,
+ "response_format": "url",
+ "model": modelName // Use the selected model (dall-e-2 or dall-e-3)
+ ]
+
+ print("πΌοΈ Using model: \(modelName) for image generation")
+
+ var request = URLRequest(url: url)
+ request.httpMethod = "POST"
+ request.setValue("application/json", forHTTPHeaderField: "Content-Type")
+ request.setValue("Bearer \(serverConfig.apiKey)", forHTTPHeaderField: "Authorization")
+ request.httpBody = try JSONSerialization.data(withJSONObject: body)
+
+ print("πΌοΈ Sending image generation request to: \(url)")
+ let (data, _) = try await URLSession.shared.data(for: request)
+
+ do {
+ let json = try JSONSerialization.jsonObject(with: data) as? [String: Any]
+
+ if let error = json?["error"] as? [String: Any],
+ let message = error["message"] as? String {
+ print("π΄ OpenAI Error: \(message)")
+ throw NSError(domain: "OpenAI", code: -1, userInfo: [NSLocalizedDescriptionKey: message])
+ }
+
+ guard let dataArray = json?["data"] as? [[String: Any]],
+ let firstImage = dataArray.first,
+ let imageUrl = firstImage["url"] as? String else {
+ throw NSError(domain: "OpenAI", code: -1, userInfo: [NSLocalizedDescriptionKey: "No image URL in response"])
+ }
+
+ return ")"
+ } catch {
+ print("π΄ Image generation error: \(error)")
+ throw error
+ }
+ }
+}
+
+private extension String {
+ func stripPrefix(_ prefix: String) -> String? {
+ guard self.hasPrefix(prefix) else { return nil }
+ return String(self.dropFirst(prefix.count))
+ }
}
diff --git a/fullmoon/Models/RequestLLMIntent.swift b/fullmoon/Models/RequestLLMIntent.swift
index d46b5ae..8d40741 100644
--- a/fullmoon/Models/RequestLLMIntent.swift
+++ b/fullmoon/Models/RequestLLMIntent.swift
@@ -40,8 +40,10 @@ struct RequestLLMIntent: AppIntent {
@MainActor
func perform() async throws -> some IntentResult & ReturnsValue & ProvidesDialog {
- let llm = LLMEvaluator()
+ // Create AppManager first
let appManager = AppManager()
+ // Pass appManager to LLMEvaluator
+ let llm = LLMEvaluator(appManager: appManager)
if prompt.isEmpty {
if let output = thread.messages.last?.content {
diff --git a/fullmoon/Models/ServerConfig.swift b/fullmoon/Models/ServerConfig.swift
new file mode 100644
index 0000000..12842b6
--- /dev/null
+++ b/fullmoon/Models/ServerConfig.swift
@@ -0,0 +1,33 @@
+import Foundation
+
+struct ServerConfig: Codable, Identifiable, Equatable {
+ let id: UUID
+ var name: String
+ var url: String
+ var apiKey: String
+ var type: ServerType
+
+ init(id: UUID = UUID(), name: String = "", url: String, apiKey: String = "", type: ServerType = .custom) {
+ self.id = id
+ self.name = name
+ self.url = url
+ self.apiKey = apiKey
+ self.type = type
+ }
+
+ enum ServerType: String, Codable, CaseIterable {
+ case openai = "OpenAI"
+ case ollama = "Ollama"
+ case lmStudio = "LM Studio"
+ case custom = "Custom"
+
+ var defaultURL: String {
+ switch self {
+ case .openai: return "https://api.openai.com/v1"
+ case .ollama: return "http://localhost:11434/v1"
+ case .lmStudio: return "http://localhost:1234/v1"
+ case .custom: return "http"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/fullmoon/Views/Chat/ConversationView.swift b/fullmoon/Views/Chat/ConversationView.swift
index 61f0123..2339657 100644
--- a/fullmoon/Views/Chat/ConversationView.swift
+++ b/fullmoon/Views/Chat/ConversationView.swift
@@ -197,8 +197,8 @@ struct MessageView: View {
}
struct ConversationView: View {
- @Environment(LLMEvaluator.self) var llm
@EnvironmentObject var appManager: AppManager
+ @Environment(LLMEvaluator.self) var llm
let thread: Thread
let generatingThreadID: UUID?
@@ -261,6 +261,5 @@ struct ConversationView: View {
#Preview {
ConversationView(thread: Thread(), generatingThreadID: nil)
- .environment(LLMEvaluator())
.environmentObject(AppManager())
}
diff --git a/fullmoon/Views/Onboarding/OnboardingDownloadingModelProgressView.swift b/fullmoon/Views/Onboarding/OnboardingDownloadingModelProgressView.swift
index 720b074..6205ea9 100644
--- a/fullmoon/Views/Onboarding/OnboardingDownloadingModelProgressView.swift
+++ b/fullmoon/Views/Onboarding/OnboardingDownloadingModelProgressView.swift
@@ -107,5 +107,5 @@ struct OnboardingDownloadingModelProgressView: View {
#Preview {
OnboardingDownloadingModelProgressView(showOnboarding: .constant(true), selectedModel: .constant(ModelConfiguration.defaultModel))
.environmentObject(AppManager())
- .environment(LLMEvaluator())
+ .environment(LLMEvaluator(appManager: AppManager()))
}
diff --git a/fullmoon/Views/Settings/ModelsSettingsView.swift b/fullmoon/Views/Settings/ModelsSettingsView.swift
index 9f76888..a51434a 100644
--- a/fullmoon/Views/Settings/ModelsSettingsView.swift
+++ b/fullmoon/Views/Settings/ModelsSettingsView.swift
@@ -10,39 +10,57 @@ import MLXLMCommon
struct ModelsSettingsView: View {
@EnvironmentObject var appManager: AppManager
- @Environment(LLMEvaluator.self) var llm
- @State var showOnboardingInstallModelView = false
+ @Environment(LLMEvaluator.self) private var llm
+ @State private var showOnboardingInstallModelView = false
+ @State private var isInitialLoad = true
+ @State private var showingAddServer = false
+ @State private var serverURL = ""
+ @State private var serverAPIKey = ""
+ @State private var serverType: ServerConfig.ServerType = .openai
+ @State private var isLoadingModels = false
var body: some View {
Form {
- Section(header: Text("installed")) {
- ForEach(appManager.installedModels, id: \.self) { modelName in
- Button {
- Task {
- await switchModel(modelName)
+ serverSection
+
+ if appManager.isUsingServer {
+ serverModelsSection
+ } else {
+ localModelsSection
+ }
+
+ Section("Server Configuration") {
+ if !appManager.servers.isEmpty {
+ Picker("Selected Server", selection: $appManager.selectedServerId) {
+ Text("None").tag(Optional.none)
+ ForEach(appManager.servers) { server in
+ Text(server.url).tag(Optional(server.id))
}
- } label: {
- Label {
- Text(appManager.modelDisplayName(modelName))
- .tint(.primary)
- } icon: {
- Image(systemName: appManager.currentModelName == modelName ? "checkmark.circle.fill" : "circle")
+ }
+
+ if let selectedServer = appManager.currentServer {
+ SecureField("API Key", text: Binding(
+ get: { selectedServer.apiKey },
+ set: { newValue in
+ if let index = appManager.servers.firstIndex(where: { $0.id == selectedServer.id }) {
+ appManager.servers[index].apiKey = newValue
+ appManager.saveServers()
+ }
+ }
+ ))
+ .textFieldStyle(.roundedBorder)
+
+ Button("Remove Server") {
+ appManager.removeServer(selectedServer)
}
+ .foregroundColor(.red)
}
- #if os(macOS)
- .buttonStyle(.borderless)
- #endif
+ }
+
+ Button("Add Server") {
+ showingAddServer = true
}
}
-
- Button {
- showOnboardingInstallModelView.toggle()
- } label: {
- Label("install a model", systemImage: "arrow.down.circle.dotted")
- }
- #if os(macOS)
- .buttonStyle(.borderless)
- #endif
}
.formStyle(.grouped)
.navigationTitle("models")
@@ -50,39 +68,263 @@ struct ModelsSettingsView: View {
.navigationBarTitleDisplayMode(.inline)
#endif
.sheet(isPresented: $showOnboardingInstallModelView) {
+ modelInstallSheet
+ }
+ .sheet(isPresented: $showingAddServer) {
NavigationStack {
- OnboardingInstallModelView(showOnboarding: $showOnboardingInstallModelView)
- .environment(llm)
- .toolbar {
- #if os(iOS) || os(visionOS)
- ToolbarItem(placement: .topBarLeading) {
- Button(action: { showOnboardingInstallModelView = false }) {
- Image(systemName: "xmark")
+ Form {
+ Section {
+ TextField("Server URL", text: $serverURL)
+ SecureField("API Key", text: $serverAPIKey)
+ Picker("Server Type", selection: $serverType) {
+ ForEach(ServerConfig.ServerType.allCases, id: \.self) { type in
+ Text(type.rawValue).tag(type)
+ }
+ }
+ }
+ }
+ .navigationTitle("Add Server")
+ .toolbar {
+ ToolbarItem(placement: .cancellationAction) {
+ Button("Cancel") {
+ showingAddServer = false
+ }
+ }
+ ToolbarItem(placement: .confirmationAction) {
+ Button("Add") {
+ let server = ServerConfig(url: serverURL, apiKey: serverAPIKey, type: serverType)
+ appManager.addServer(server)
+ showingAddServer = false
+ serverURL = ""
+ serverAPIKey = ""
+ }
+ .disabled(serverURL.isEmpty)
+ }
+ }
+ }
+ .presentationDetents([.medium])
+ }
+ // Load models when server changes
+ .onChange(of: appManager.selectedServerId) { _ in
+ Task {
+ await loadModels()
+ }
+ }
+ // Load models when server mode changes
+ .onChange(of: appManager.isUsingServer) { isServer in
+ if isServer {
+ Task {
+ await loadModels()
+ }
+ }
+ }
+ // Initial load
+ .task {
+ if isInitialLoad && appManager.isUsingServer {
+ isInitialLoad = false
+ await loadModels()
+ }
+ }
+ }
+
+ private func loadModels() async {
+ guard let server = appManager.currentServer else { return }
+
+ await MainActor.run {
+ llm.isLoadingModels = true
+ // Clear current models while loading
+ llm.serverModels = []
+ }
+
+ // First show cached models
+ await MainActor.run {
+ llm.serverModels = appManager.getCachedModels(for: server.id)
+ }
+
+ // Then fetch fresh models
+ let models = await llm.fetchServerModels(for: server)
+
+ await MainActor.run {
+ llm.serverModels = models
+ appManager.updateCachedModels(serverId: server.id, models: models)
+ llm.isLoadingModels = false
+ }
+ }
+
+ // MARK: - View Components
+
+ private var serverSection: some View {
+ Section {
+ Toggle("Use Server API", isOn: $appManager.isUsingServer)
+ .toggleStyle(.switch)
+
+ if appManager.isUsingServer {
+ ForEach(appManager.servers) { server in
+ HStack {
+ Button {
+ appManager.selectedServerId = server.id
+ } label: {
+ HStack {
+ Text(server.name)
+ .foregroundStyle(appManager.selectedServerId == server.id ? .primary : .secondary)
+ Spacer()
+ if appManager.selectedServerId == server.id {
+ Image(systemName: "checkmark")
+ .foregroundStyle(.secondary)
+ }
}
}
- #elseif os(macOS)
- ToolbarItem(placement: .destructiveAction) {
- Button(action: { showOnboardingInstallModelView = false }) {
- Text("close")
+ .buttonStyle(.plain)
+
+ Spacer()
+
+ Button {
+ appManager.removeServer(server)
+ } label: {
+ Image(systemName: "trash")
+ .foregroundStyle(.secondary)
+ }
+ .buttonStyle(.plain)
+ }
+ }
+ }
+ } footer: {
+ Text("Configure to use local models or connect to a server that supports OpenAI API spec")
+ .font(.footnote)
+ .foregroundStyle(.secondary)
+ }
+ }
+
+ private var serverModelsSection: some View {
+ Section {
+ if llm.isLoadingModels {
+ HStack {
+ Spacer()
+ ProgressView()
+ .controlSize(.small)
+ Spacer()
+ }
+ } else if llm.serverModels.isEmpty {
+ Text("No models available")
+ .foregroundStyle(.secondary)
+ .font(.subheadline)
+ } else {
+ ForEach(llm.serverModels, id: \.self) { model in
+ Button {
+ appManager.currentModelName = model
+ } label: {
+ HStack {
+ Text(model)
+ .foregroundStyle(appManager.currentModelName == model ? .primary : .secondary)
+ Spacer()
+ if appManager.currentModelName == model {
+ Image(systemName: "checkmark")
+ .foregroundStyle(.secondary)
}
}
- #endif
}
+ .buttonStyle(.plain)
+ }
+ }
+ } header: {
+ HStack {
+ Text("Available Server Models")
+ .font(.footnote)
+ .foregroundStyle(.secondary)
+ Spacer()
+ Button {
+ Task {
+ await loadModels()
+ }
+ } label: {
+ Image(systemName: "arrow.clockwise")
+ .foregroundStyle(.secondary)
+ .font(.footnote)
+ }
+ .buttonStyle(.plain)
+ }
+ }
+ }
+
+ private var localModelsSection: some View {
+ Section {
+ ForEach(appManager.installedModels, id: \.self) { modelName in
+ modelButton(modelName: modelName, isServer: false)
+ }
+
+ Button {
+ showOnboardingInstallModelView.toggle()
+ } label: {
+ Label {
+ Text("Install New Model")
+ .foregroundStyle(.secondary)
+ } icon: {
+ Image(systemName: "arrow.down.circle")
+ .foregroundStyle(.secondary)
+ }
+ }
+ .buttonStyle(.plain)
+ } header: {
+ Text("Installed Models")
+ .font(.footnote)
+ .foregroundStyle(.secondary)
+ }
+ }
+
+ private func modelButton(modelName: String, isServer: Bool) -> some View {
+ Button {
+ Task {
+ if isServer {
+ appManager.isUsingServer = true
+ appManager.currentModelName = modelName
+ appManager.playHaptic()
+ } else {
+ appManager.isUsingServer = false
+ appManager.currentModelName = modelName
+ appManager.playHaptic()
+ }
+ }
+ } label: {
+ HStack {
+ Text(modelName)
+ .foregroundStyle(appManager.currentModelName == modelName ? .primary : .secondary)
+ Spacer()
+ if appManager.currentModelName == modelName {
+ Image(systemName: "checkmark")
+ .foregroundStyle(.secondary)
+ }
}
}
+ .buttonStyle(.plain)
}
- private func switchModel(_ modelName: String) async {
- if let model = ModelConfiguration.availableModels.first(where: {
- $0.name == modelName
- }) {
- appManager.currentModelName = modelName
- appManager.playHaptic()
- await llm.switchModel(model)
+ private var modelInstallSheet: some View {
+ NavigationStack {
+ OnboardingInstallModelView(showOnboarding: $showOnboardingInstallModelView)
+ .environment(llm)
+ .toolbar {
+ #if os(iOS) || os(visionOS)
+ ToolbarItem(placement: .topBarLeading) {
+ Button(action: { showOnboardingInstallModelView = false }) {
+ Image(systemName: "xmark")
+ }
+ }
+ #elseif os(macOS)
+ ToolbarItem(placement: .destructiveAction) {
+ Button(action: { showOnboardingInstallModelView = false }) {
+ Text("Close")
+ }
+ }
+ #endif
+ }
}
}
}
#Preview {
- ModelsSettingsView()
+ NavigationStack {
+ ModelsSettingsView()
+ .environmentObject(AppManager())
+ .environment(LLMEvaluator(appManager: AppManager()))
+ }
}
diff --git a/fullmoon/Views/Settings/SettingsView.swift b/fullmoon/Views/Settings/SettingsView.swift
index d9a3d19..c512f8c 100644
--- a/fullmoon/Views/Settings/SettingsView.swift
+++ b/fullmoon/Views/Settings/SettingsView.swift
@@ -109,5 +109,5 @@ extension Bundle {
#Preview {
SettingsView(currentThread: .constant(nil))
.environmentObject(AppManager())
- .environment(LLMEvaluator())
+ .environment(LLMEvaluator(appManager: AppManager()))
}
diff --git a/fullmoon/fullmoonApp.swift b/fullmoon/fullmoonApp.swift
index 86f8515..6c1866c 100644
--- a/fullmoon/fullmoonApp.swift
+++ b/fullmoon/fullmoonApp.swift
@@ -13,8 +13,17 @@ struct fullmoonApp: App {
#if os(macOS)
@NSApplicationDelegateAdaptor(AppDelegate.self) var appDelegate
#endif
- @StateObject var appManager = AppManager()
- @State var llm = LLMEvaluator()
+ @StateObject private var appManager: AppManager
+ @State private var llm: LLMEvaluator
+
+ init() {
+ // First create the AppManager
+ let manager = AppManager()
+ // Initialize the StateObject
+ _appManager = StateObject(wrappedValue: manager)
+ // Then create the LLMEvaluator with the manager instance
+ _llm = State(initialValue: LLMEvaluator(appManager: manager))
+ }
var body: some Scene {
WindowGroup {
@@ -59,7 +68,7 @@ class AppDelegate: NSObject, NSApplicationDelegate, NSWindowDelegate {
}
func applicationShouldHandleReopen(_ sender: NSApplication, hasVisibleWindows flag: Bool) -> Bool {
- // if thereβs a recently closed window, bring that back
+ // if there's a recently closed window, bring that back
if let lastClosed = closedWindowsStack.popLast() {
lastClosed.makeKeyAndOrderFront(self)
} else {