discourse-ai/spec/lib/completions/json_stream_decoder_spec.rb
Sam e817b7dc11
FEATURE: improve tool support (#904)
This re-implements tool support in DiscourseAi::Completions::Llm #generate

Previously tool support was always returned via XML and it would be the responsibility of the caller to parse XML

New implementation has the endpoints return ToolCall objects.

Additionally this simplifies the Llm endpoint interface and gives it more clarity. Llms must implement

decode, decode_chunk (for streaming)

It is the implementers responsibility to figure out how to decode chunks, base no longer implements. To make this easy we ship a flexible json decoder which is easy to wire up.

Also (new)

    Better debugging for PMs, we now have a next / previous button to see all the Llm messages associated with a PM
    Token accounting is fixed for vllm (we were not correctly counting tokens)
2024-11-12 08:14:30 +11:00

48 lines
1.1 KiB
Ruby

# frozen_string_literal: true
describe DiscourseAi::Completions::JsonStreamDecoder do
let(:decoder) { DiscourseAi::Completions::JsonStreamDecoder.new }
it "should be able to parse simple messages" do
result = decoder << "data: #{{ hello: "world" }.to_json}"
expect(result).to eq([{ hello: "world" }])
end
it "should handle anthropic mixed stlye streams" do
stream = (<<~TEXT).split("|")
event: |message_start|
data: |{"hel|lo": "world"}|
event: |message_start
data: {"foo": "bar"}
event: |message_start
data: {"ba|z": "qux"|}
[DONE]
TEXT
results = []
stream.each { |chunk| results << (decoder << chunk) }
expect(results.flatten.compact).to eq([{ hello: "world" }, { foo: "bar" }, { baz: "qux" }])
end
it "should be able to handle complex overlaps" do
stream = (<<~TEXT).split("|")
data: |{"hel|lo": "world"}
data: {"foo": "bar"}
data: {"ba|z": "qux"|}
[DONE]
TEXT
results = []
stream.each { |chunk| results << (decoder << chunk) }
expect(results.flatten.compact).to eq([{ hello: "world" }, { foo: "bar" }, { baz: "qux" }])
end
end