mirror of
https://github.com/tiennm99/miti99bot.git
synced 2026-04-28 04:20:38 +00:00
31ced88b78
ConceptNet (api.conceptnet.io) was returning sustained 502s, breaking every guess with an "Upstream hiccup" reply. Replace with env.AI.run on @cf/baai/bge-small-en-v1.5 and score guesses by computing cosine similarity locally against the target vector. The local google-10k wordlist doubles as the in/out-of-vocabulary set, so OOV detection is an O(1) Set.has() with no upstream call. The similarity() response shape is unchanged, so handlers/render/state stay as-is. Free on the Workers Free plan: 10k Neurons/day cap, ~0.0037 Neurons per 2-word guess → ~2.7M guesses/day headroom for this bot.
144 lines
5.3 KiB
JavaScript
144 lines
5.3 KiB
JavaScript
import { describe, expect, it, vi } from "vitest";
|
|
import {
|
|
UpstreamError,
|
|
Word2SimError,
|
|
createClient,
|
|
} from "../../../src/modules/semantle/api-client.js";
|
|
|
|
/**
|
|
* Build a deterministic 768-dim vector from a seed so cosine scores are
|
|
* reproducible in tests without hardcoding 768 floats.
|
|
*/
|
|
function fakeVector(seed, dim = 768) {
|
|
const out = new Array(dim);
|
|
for (let i = 0; i < dim; i++) out[i] = Math.sin(seed * (i + 1));
|
|
return out;
|
|
}
|
|
|
|
/**
|
|
* Minimal Workers AI binding fake. `impl(model, input)` returns the payload
|
|
* `env.AI.run()` would normally resolve to.
|
|
*/
|
|
function fakeAi(impl) {
|
|
return { run: vi.fn(impl) };
|
|
}
|
|
|
|
describe("semantle/api-client", () => {
|
|
describe("UpstreamError", () => {
|
|
it("stores status and body metadata", () => {
|
|
const err = new UpstreamError("test", { status: 404, body: "not found" });
|
|
expect(err.message).toBe("test");
|
|
expect(err.status).toBe(404);
|
|
expect(err.body).toBe("not found");
|
|
expect(err.name).toBe("UpstreamError");
|
|
});
|
|
|
|
it("stores cause when provided", () => {
|
|
const cause = new Error("underlying");
|
|
const err = new UpstreamError("wrapper", { cause });
|
|
expect(err.cause).toBe(cause);
|
|
});
|
|
|
|
it("is re-exported as Word2SimError alias for legacy callers", () => {
|
|
expect(Word2SimError).toBe(UpstreamError);
|
|
});
|
|
});
|
|
|
|
describe("createClient", () => {
|
|
it("throws without a valid AI binding", () => {
|
|
expect(() => createClient(null)).toThrow(TypeError);
|
|
expect(() => createClient({})).toThrow(TypeError);
|
|
expect(() => createClient({ run: "not a function" })).toThrow(TypeError);
|
|
});
|
|
|
|
it("similarity batches target + guess in a single run() call", async () => {
|
|
const ai = fakeAi(async (_model, { text }) => ({
|
|
shape: [text.length, 768],
|
|
data: text.map((_, i) => fakeVector(i + 1)),
|
|
}));
|
|
const client = createClient(ai);
|
|
await client.similarity("apple", "orange");
|
|
expect(ai.run).toHaveBeenCalledTimes(1);
|
|
const [model, input] = ai.run.mock.calls[0];
|
|
expect(model).toBe("@cf/baai/bge-small-en-v1.5");
|
|
expect(input).toEqual({ text: ["apple", "orange"] });
|
|
});
|
|
|
|
it("similarity returns cosine score for in-vocab guess", async () => {
|
|
const ai = fakeAi(async (_model, { text }) => ({
|
|
data: text.map((_, i) => fakeVector(i + 1)),
|
|
}));
|
|
const client = createClient(ai);
|
|
const res = await client.similarity("apple", "orange");
|
|
expect(res.in_vocab_a).toBe(true);
|
|
expect(res.in_vocab_b).toBe(true);
|
|
expect(res.canonical_a).toBe("apple");
|
|
expect(res.canonical_b).toBe("orange");
|
|
expect(typeof res.similarity).toBe("number");
|
|
expect(res.similarity).toBeGreaterThan(-1);
|
|
expect(res.similarity).toBeLessThanOrEqual(1);
|
|
});
|
|
|
|
it("similarity returns 1 for identical vectors", async () => {
|
|
const vec = fakeVector(7);
|
|
const ai = fakeAi(async () => ({ data: [vec, vec] }));
|
|
const client = createClient(ai);
|
|
const res = await client.similarity("apple", "orange");
|
|
expect(res.similarity).toBeCloseTo(1, 10);
|
|
});
|
|
|
|
it("similarity skips the AI call for OOV guess and flags in_vocab_b:false", async () => {
|
|
const ai = fakeAi(async () => ({ data: [fakeVector(1), fakeVector(2)] }));
|
|
const client = createClient(ai);
|
|
const res = await client.similarity("apple", "zzzfoobarbaz");
|
|
expect(res.in_vocab_b).toBe(false);
|
|
expect(res.similarity).toBe(null);
|
|
expect(ai.run).not.toHaveBeenCalled();
|
|
});
|
|
|
|
it("similarity wraps AI.run rejection as UpstreamError", async () => {
|
|
const ai = fakeAi(async () => {
|
|
throw new Error("boom");
|
|
});
|
|
const client = createClient(ai);
|
|
await expect(client.similarity("apple", "orange")).rejects.toMatchObject({
|
|
name: "UpstreamError",
|
|
});
|
|
});
|
|
|
|
it("similarity throws UpstreamError on malformed payload", async () => {
|
|
const ai = fakeAi(async () => ({ data: [fakeVector(1)] })); // only 1 vector
|
|
const client = createClient(ai);
|
|
await expect(client.similarity("apple", "orange")).rejects.toMatchObject({
|
|
name: "UpstreamError",
|
|
});
|
|
});
|
|
|
|
it("similarity returns null score when a vector norm is zero", async () => {
|
|
const zero = new Array(768).fill(0);
|
|
const ai = fakeAi(async () => ({ data: [zero, fakeVector(1)] }));
|
|
const client = createClient(ai);
|
|
const res = await client.similarity("apple", "orange");
|
|
expect(res.in_vocab_b).toBe(true);
|
|
expect(res.similarity).toBe(null);
|
|
});
|
|
|
|
it("randomWord returns a verified pick from the local pool", async () => {
|
|
const ai = fakeAi(async () => ({ data: [] }));
|
|
const client = createClient(ai);
|
|
const res = await client.randomWord();
|
|
expect(typeof res.word).toBe("string");
|
|
expect(res.word.length).toBeGreaterThan(0);
|
|
expect(res.verified).toBe(true);
|
|
expect(ai.run).not.toHaveBeenCalled();
|
|
});
|
|
|
|
it("supports model override via options", async () => {
|
|
const ai = fakeAi(async () => ({ data: [fakeVector(1), fakeVector(2)] }));
|
|
const client = createClient(ai, { model: "@cf/baai/bge-large-en-v1.5" });
|
|
await client.similarity("apple", "orange");
|
|
expect(ai.run.mock.calls[0][0]).toBe("@cf/baai/bge-large-en-v1.5");
|
|
});
|
|
});
|
|
});
|