Some checks failed
Release Gate / gate (push) Has been cancelled
- ContextCondenserTests 추가: proactive 비활성 무변경, 대용량 tool_result 축약 검증 - LlmRuntimeOverrideTests 보강: vLLM API키 복호화/SSL 우회 합성 규칙 검증 - README, DEVELOPMENT, NEXT_ROADMAP 문서 이력(2026-04-04 14:47 KST) 동기화
260 lines
10 KiB
C#
260 lines
10 KiB
C#
using System.Collections.Generic;
|
|
using System.Reflection;
|
|
using AxCopilot.Models;
|
|
using AxCopilot.Services;
|
|
using AxCopilot.Services.Agent;
|
|
using FluentAssertions;
|
|
using Xunit;
|
|
|
|
namespace AxCopilot.Tests.Services;
|
|
|
|
public class LlmRuntimeOverrideTests
|
|
{
|
|
[Fact]
|
|
public void PushInferenceOverride_PopInferenceOverride_RestoresPreviousState()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "ollama";
|
|
settings.Settings.Llm.Model = "base-model";
|
|
settings.Settings.Llm.Temperature = 0.7;
|
|
|
|
using var llm = new LlmService(settings);
|
|
|
|
llm.PushRouteOverride("gemini", "gemini-2.5-pro");
|
|
llm.PushInferenceOverride(temperature: 0.2, reasoningEffort: "high");
|
|
|
|
llm.GetCurrentModelInfo().service.Should().Be("gemini");
|
|
llm.GetCurrentModelInfo().model.Should().Be("gemini-2.5-pro");
|
|
InvokePrivate<double>(llm, "ResolveTemperature").Should().Be(0.2);
|
|
InvokePrivate<string?>(llm, "ResolveReasoningEffort").Should().Be("high");
|
|
|
|
llm.PopInferenceOverride();
|
|
llm.GetCurrentModelInfo().service.Should().Be("gemini");
|
|
llm.GetCurrentModelInfo().model.Should().Be("gemini-2.5-pro");
|
|
InvokePrivate<double>(llm, "ResolveTemperature").Should().Be(0.7);
|
|
InvokePrivate<string?>(llm, "ResolveReasoningEffort").Should().BeNull();
|
|
|
|
llm.ClearRouteOverride();
|
|
llm.GetCurrentModelInfo().service.Should().Be("ollama");
|
|
llm.GetCurrentModelInfo().model.Should().Be("base-model");
|
|
}
|
|
|
|
[Fact]
|
|
public void AgentLoop_ResolveSkillRuntimeOverrides_MapsModelAndEffort()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "ollama";
|
|
settings.Settings.Llm.Model = "base-model";
|
|
settings.Settings.Llm.RegisteredModels =
|
|
[
|
|
new RegisteredModel
|
|
{
|
|
Alias = "gpt-5.4",
|
|
EncryptedModelName = "gpt-5.4",
|
|
Service = "vllm",
|
|
}
|
|
];
|
|
|
|
using var llm = new LlmService(settings);
|
|
var loop = new AgentLoopService(llm, ToolRegistry.CreateDefault(), settings);
|
|
var messages = new List<ChatMessage>
|
|
{
|
|
new()
|
|
{
|
|
Role = "system",
|
|
Content = """
|
|
[Skill Runtime Policy]
|
|
- preferred_model: gpt-5.4
|
|
- reasoning_effort: high
|
|
- execution_context: fork
|
|
- allowed_tools: Read, process
|
|
- hook_names: lint-pre, verify-post
|
|
- hook_filters: lint-pre@pre@file_edit, verify-post@post@*
|
|
"""
|
|
},
|
|
new() { Role = "user", Content = "test" }
|
|
};
|
|
|
|
var method = typeof(AgentLoopService).GetMethod(
|
|
"ResolveSkillRuntimeOverrides",
|
|
BindingFlags.NonPublic | BindingFlags.Instance);
|
|
method.Should().NotBeNull();
|
|
|
|
var result = method!.Invoke(loop, [messages]);
|
|
result.Should().NotBeNull();
|
|
|
|
var resultType = result!.GetType();
|
|
resultType.GetProperty("Service")!.GetValue(result)!.Should().Be("vllm");
|
|
resultType.GetProperty("Model")!.GetValue(result)!.Should().Be("gpt-5.4");
|
|
resultType.GetProperty("ReasoningEffort")!.GetValue(result)!.Should().Be("high");
|
|
resultType.GetProperty("Temperature")!.GetValue(result)!.Should().Be(0.2);
|
|
resultType.GetProperty("RequireForkExecution")!.GetValue(result)!.Should().Be(true);
|
|
|
|
var allowedSet = (IReadOnlySet<string>)resultType.GetProperty("AllowedToolNames")!.GetValue(result)!;
|
|
allowedSet.Should().Contain("file_read");
|
|
allowedSet.Should().Contain("process");
|
|
|
|
var hookSet = (IReadOnlySet<string>)resultType.GetProperty("HookNames")!.GetValue(result)!;
|
|
hookSet.Should().Contain("lint-pre");
|
|
hookSet.Should().Contain("verify-post");
|
|
|
|
var filters = (IReadOnlyList<object>)resultType.GetProperty("HookFilters")!.GetValue(result)!;
|
|
filters.Should().NotBeEmpty();
|
|
}
|
|
|
|
private static T InvokePrivate<T>(object instance, string methodName)
|
|
{
|
|
var method = instance.GetType().GetMethod(methodName, BindingFlags.NonPublic | BindingFlags.Instance);
|
|
method.Should().NotBeNull();
|
|
return (T)method!.Invoke(instance, null)!;
|
|
}
|
|
|
|
[Fact]
|
|
public void ResolveServerInfo_VllmGlobalInsecureTls_ShouldBeApplied()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "vllm";
|
|
settings.Settings.Llm.Model = "vllm-model";
|
|
settings.Settings.Llm.VllmEndpoint = "https://vllm.internal";
|
|
settings.Settings.Llm.VllmApiKey = "global-key";
|
|
settings.Settings.Llm.VllmAllowInsecureTls = true;
|
|
settings.Settings.Llm.EncryptionEnabled = false;
|
|
|
|
using var llm = new LlmService(settings);
|
|
|
|
var method = typeof(LlmService).GetMethod("ResolveServerInfo", BindingFlags.NonPublic | BindingFlags.Instance);
|
|
method.Should().NotBeNull();
|
|
|
|
var tuple = ((string Endpoint, string ApiKey, bool AllowInsecureTls))method!.Invoke(llm, null)!;
|
|
tuple.Endpoint.Should().Be("https://vllm.internal");
|
|
tuple.ApiKey.Should().Be("global-key");
|
|
tuple.AllowInsecureTls.Should().Be(true);
|
|
}
|
|
|
|
[Fact]
|
|
public void ResolveServerInfo_RegisteredModelOverride_ShouldUseEndpointAndApiKey()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "vllm";
|
|
settings.Settings.Llm.Model = "corp-vllm-model";
|
|
settings.Settings.Llm.VllmEndpoint = "https://fallback.internal";
|
|
settings.Settings.Llm.VllmApiKey = "fallback-key";
|
|
settings.Settings.Llm.VllmAllowInsecureTls = false;
|
|
settings.Settings.Llm.EncryptionEnabled = false;
|
|
settings.Settings.Llm.RegisteredModels =
|
|
[
|
|
new RegisteredModel
|
|
{
|
|
Alias = "corp",
|
|
EncryptedModelName = "corp-vllm-model",
|
|
Service = "vllm",
|
|
Endpoint = "https://model.internal",
|
|
ApiKey = "model-key",
|
|
AllowInsecureTls = true
|
|
}
|
|
];
|
|
|
|
using var llm = new LlmService(settings);
|
|
|
|
var method = typeof(LlmService).GetMethod("ResolveServerInfo", BindingFlags.NonPublic | BindingFlags.Instance);
|
|
method.Should().NotBeNull();
|
|
|
|
var tuple = ((string Endpoint, string ApiKey, bool AllowInsecureTls))method!.Invoke(llm, null)!;
|
|
tuple.Endpoint.Should().Be("https://model.internal");
|
|
tuple.ApiKey.Should().Be("model-key");
|
|
tuple.AllowInsecureTls.Should().Be(true);
|
|
}
|
|
|
|
[Fact]
|
|
public void ResolveServerInfo_VllmEncryptedApiKey_ShouldBeDecryptedAtRuntime()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "vllm";
|
|
settings.Settings.Llm.Model = "corp-vllm-model";
|
|
settings.Settings.Llm.VllmEndpoint = "https://secure.internal";
|
|
settings.Settings.Llm.EncryptionEnabled = true;
|
|
settings.Settings.Llm.VllmApiKey = CryptoService.EncryptIfEnabled("enc-key-value", true);
|
|
|
|
using var llm = new LlmService(settings);
|
|
|
|
var method = typeof(LlmService).GetMethod("ResolveServerInfo", BindingFlags.NonPublic | BindingFlags.Instance);
|
|
method.Should().NotBeNull();
|
|
|
|
var tuple = ((string Endpoint, string ApiKey, bool AllowInsecureTls))method!.Invoke(llm, null)!;
|
|
tuple.Endpoint.Should().Be("https://secure.internal");
|
|
tuple.ApiKey.Should().Be("enc-key-value");
|
|
}
|
|
|
|
[Fact]
|
|
public void ResolveServerInfo_RegisteredModelInsecureFalse_GlobalInsecureTrue_ShouldRemainTrue()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "vllm";
|
|
settings.Settings.Llm.Model = "corp-vllm-model";
|
|
settings.Settings.Llm.VllmEndpoint = "https://fallback.internal";
|
|
settings.Settings.Llm.VllmApiKey = "fallback-key";
|
|
settings.Settings.Llm.VllmAllowInsecureTls = true;
|
|
settings.Settings.Llm.EncryptionEnabled = false;
|
|
settings.Settings.Llm.RegisteredModels =
|
|
[
|
|
new RegisteredModel
|
|
{
|
|
Alias = "corp",
|
|
EncryptedModelName = "corp-vllm-model",
|
|
Service = "vllm",
|
|
Endpoint = "https://model.internal",
|
|
ApiKey = "model-key",
|
|
AllowInsecureTls = false
|
|
}
|
|
];
|
|
|
|
using var llm = new LlmService(settings);
|
|
var method = typeof(LlmService).GetMethod("ResolveServerInfo", BindingFlags.NonPublic | BindingFlags.Instance);
|
|
method.Should().NotBeNull();
|
|
|
|
var tuple = ((string Endpoint, string ApiKey, bool AllowInsecureTls))method!.Invoke(llm, null)!;
|
|
tuple.AllowInsecureTls.Should().BeTrue();
|
|
}
|
|
|
|
[Fact]
|
|
public void GetRuntimeConnectionSnapshot_Vllm_ShouldExposeMaskedRuntimeInputs()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "vllm";
|
|
settings.Settings.Llm.Model = "corp-vllm-model";
|
|
settings.Settings.Llm.VllmEndpoint = "https://model.internal:8443";
|
|
settings.Settings.Llm.VllmApiKey = "model-key";
|
|
settings.Settings.Llm.VllmAllowInsecureTls = true;
|
|
settings.Settings.Llm.EncryptionEnabled = false;
|
|
|
|
using var llm = new LlmService(settings);
|
|
var snapshot = llm.GetRuntimeConnectionSnapshot();
|
|
|
|
snapshot.Service.Should().Be("vllm");
|
|
snapshot.Model.Should().Be("corp-vllm-model");
|
|
snapshot.Endpoint.Should().Be("https://model.internal:8443");
|
|
snapshot.AllowInsecureTls.Should().BeTrue();
|
|
snapshot.HasApiKey.Should().BeTrue();
|
|
}
|
|
|
|
[Fact]
|
|
public void GetRuntimeConnectionSnapshot_OllamaWithoutKey_ShouldReportNoKey()
|
|
{
|
|
var settings = new SettingsService();
|
|
settings.Settings.Llm.Service = "ollama";
|
|
settings.Settings.Llm.Model = "qwen2.5-coder";
|
|
settings.Settings.Llm.OllamaEndpoint = "http://localhost:11434";
|
|
settings.Settings.Llm.OllamaApiKey = "";
|
|
settings.Settings.Llm.EncryptionEnabled = false;
|
|
|
|
using var llm = new LlmService(settings);
|
|
var snapshot = llm.GetRuntimeConnectionSnapshot();
|
|
|
|
snapshot.Service.Should().Be("ollama");
|
|
snapshot.Model.Should().Be("qwen2.5-coder");
|
|
snapshot.Endpoint.Should().Be("http://localhost:11434");
|
|
snapshot.AllowInsecureTls.Should().BeFalse();
|
|
snapshot.HasApiKey.Should().BeFalse();
|
|
}
|
|
}
|