diff --git a/Dockerfile b/Dockerfile index 3e03bad..d07414a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -77,6 +77,17 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/{apt,dpkg,cache,log} +# pydub is loaded by torch, which will throw a warning if ffmpeg isn't installed +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common \ + && add-apt-repository -y ppa:kobuk-team/intel-graphics \ + && apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + ffmpeg \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/{apt,dpkg,cache,log} + + # Prerequisite for ze-monitor RUN apt-get update \ && DEBIAN_FRONTEND=noninteractive apt-get install -y \ diff --git a/cache/.keep b/cache/.keep old mode 100644 new mode 100755 diff --git a/cache/grafana/.keep b/cache/grafana/.keep old mode 100644 new mode 100755 diff --git a/cache/prometheus/.keep b/cache/prometheus/.keep old mode 100644 new mode 100755 diff --git a/frontend/package-lock.json b/frontend/package-lock.json index b574bbb..2896011 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -24,6 +24,7 @@ "@types/react": "^19.0.12", "@types/react-dom": "^19.0.4", "@uiw/react-json-view": "^2.0.0-alpha.31", + "@uiw/react-markdown-editor": "^6.1.4", "jsonrepair": "^3.12.0", "markdown-it": "^14.1.0", "mermaid": "^11.6.0", @@ -2041,6 +2042,371 @@ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", "peer": true }, + "node_modules/@codemirror/autocomplete": { + "version": "6.18.6", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.6.tgz", + "integrity": "sha512-PHHBXFomUs5DF+9tCOM/UoW6XQ4R44lLNNhRaW9PKPTU0D7lIjRg3ElxaJnTwsl/oHiR93WSXDBrekhoUGCPtg==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.17.0", + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@codemirror/commands": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.8.1.tgz", + "integrity": "sha512-KlGVYufHMQzxbdQONiLyGQDUW0itrLZwq3CcY7xpv9ZLRHqzkBSoteocBHtMCoY7/Ci4xhzSrToIeLg7FxHuaw==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.4.0", + "@codemirror/view": "^6.27.0", + "@lezer/common": "^1.1.0" + } + }, + "node_modules/@codemirror/lang-angular": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/@codemirror/lang-angular/-/lang-angular-0.1.4.tgz", + "integrity": "sha512-oap+gsltb/fzdlTQWD6BFF4bSLKcDnlxDsLdePiJpCVNKWXSTAbiiQeYI3UmES+BLAdkmIC1WjyztC1pi/bX4g==", + "dependencies": { + "@codemirror/lang-html": "^6.0.0", + "@codemirror/lang-javascript": "^6.1.2", + "@codemirror/language": "^6.0.0", + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.3.3" + } + }, + "node_modules/@codemirror/lang-cpp": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@codemirror/lang-cpp/-/lang-cpp-6.0.2.tgz", + "integrity": "sha512-6oYEYUKHvrnacXxWxYa6t4puTlbN3dgV662BDfSH8+MfjQjVmP697/KYTDOqpxgerkvoNm7q5wlFMBeX8ZMocg==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@lezer/cpp": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-css": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-css/-/lang-css-6.3.1.tgz", + "integrity": "sha512-kr5fwBGiGtmz6l0LSJIbno9QrifNMUusivHbnA1H6Dmqy4HZFte3UAICix1VuKo0lMPKQr2rqB+0BkKi/S3Ejg==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.0.2", + "@lezer/css": "^1.1.7" + } + }, + "node_modules/@codemirror/lang-go": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-go/-/lang-go-6.0.1.tgz", + "integrity": "sha512-7fNvbyNylvqCphW9HD6WFnRpcDjr+KXX/FgqXy5H5ZS0eC5edDljukm/yNgYkwTsgp2busdod50AOTIy6Jikfg==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.6.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.0.0", + "@lezer/go": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-html": { + "version": "6.4.9", + "resolved": "https://registry.npmjs.org/@codemirror/lang-html/-/lang-html-6.4.9.tgz", + "integrity": "sha512-aQv37pIMSlueybId/2PVSP6NPnmurFDVmZwzc7jszd2KAF8qd4VBbvNYPXWQq90WIARjsdVkPbw29pszmHws3Q==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/lang-css": "^6.0.0", + "@codemirror/lang-javascript": "^6.0.0", + "@codemirror/language": "^6.4.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.17.0", + "@lezer/common": "^1.0.0", + "@lezer/css": "^1.1.0", + "@lezer/html": "^1.3.0" + } + }, + "node_modules/@codemirror/lang-java": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-java/-/lang-java-6.0.1.tgz", + "integrity": "sha512-OOnmhH67h97jHzCuFaIEspbmsT98fNdhVhmA3zCxW0cn7l8rChDhZtwiwJ/JOKXgfm4J+ELxQihxaI7bj7mJRg==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@lezer/java": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-javascript": { + "version": "6.2.4", + "resolved": "https://registry.npmjs.org/@codemirror/lang-javascript/-/lang-javascript-6.2.4.tgz", + "integrity": "sha512-0WVmhp1QOqZ4Rt6GlVGwKJN3KW7Xh4H2q8ZZNGZaP6lRdxXJzmjm4FqvmOojVj6khWJHIb9sp7U/72W7xQgqAA==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.6.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.17.0", + "@lezer/common": "^1.0.0", + "@lezer/javascript": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-json": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-json/-/lang-json-6.0.1.tgz", + "integrity": "sha512-+T1flHdgpqDDlJZ2Lkil/rLiRy684WMLc74xUnjJH48GQdfJo/pudlTRreZmKwzP8/tGdKf83wlbAdOCzlJOGQ==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@lezer/json": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-less": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@codemirror/lang-less/-/lang-less-6.0.2.tgz", + "integrity": "sha512-EYdQTG22V+KUUk8Qq582g7FMnCZeEHsyuOJisHRft/mQ+ZSZ2w51NupvDUHiqtsOy7It5cHLPGfHQLpMh9bqpQ==", + "dependencies": { + "@codemirror/lang-css": "^6.2.0", + "@codemirror/language": "^6.0.0", + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-liquid": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/@codemirror/lang-liquid/-/lang-liquid-6.2.3.tgz", + "integrity": "sha512-yeN+nMSrf/lNii3FJxVVEGQwFG0/2eDyH6gNOj+TGCa0hlNO4bhQnoO5ISnd7JOG+7zTEcI/GOoyraisFVY7jQ==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/lang-html": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/common": "^1.0.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.3.1" + } + }, + "node_modules/@codemirror/lang-markdown": { + "version": "6.3.2", + "resolved": "https://registry.npmjs.org/@codemirror/lang-markdown/-/lang-markdown-6.3.2.tgz", + "integrity": "sha512-c/5MYinGbFxYl4itE9q/rgN/sMTjOr8XL5OWnC+EaRMLfCbVUmmubTJfdgpfcSS2SCaT7b+Q+xi3l6CgoE+BsA==", + "dependencies": { + "@codemirror/autocomplete": "^6.7.1", + "@codemirror/lang-html": "^6.0.0", + "@codemirror/language": "^6.3.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/common": "^1.2.1", + "@lezer/markdown": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-php": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-php/-/lang-php-6.0.1.tgz", + "integrity": "sha512-ublojMdw/PNWa7qdN5TMsjmqkNuTBD3k6ndZ4Z0S25SBAiweFGyY68AS3xNcIOlb6DDFDvKlinLQ40vSLqf8xA==", + "dependencies": { + "@codemirror/lang-html": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.0.0", + "@lezer/php": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-python": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-python/-/lang-python-6.2.1.tgz", + "integrity": "sha512-IRjC8RUBhn9mGR9ywecNhB51yePWCGgvHfY1lWN/Mrp3cKuHr0isDKia+9HnvhiWNnMpbGhWrkhuWOc09exRyw==", + "dependencies": { + "@codemirror/autocomplete": "^6.3.2", + "@codemirror/language": "^6.8.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.2.1", + "@lezer/python": "^1.1.4" + } + }, + "node_modules/@codemirror/lang-rust": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-rust/-/lang-rust-6.0.1.tgz", + "integrity": "sha512-344EMWFBzWArHWdZn/NcgkwMvZIWUR1GEBdwG8FEp++6o6vT6KL9V7vGs2ONsKxxFUPXKI0SPcWhyYyl2zPYxQ==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@lezer/rust": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-sass": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@codemirror/lang-sass/-/lang-sass-6.0.2.tgz", + "integrity": "sha512-l/bdzIABvnTo1nzdY6U+kPAC51czYQcOErfzQ9zSm9D8GmNPD0WTW8st/CJwBTPLO8jlrbyvlSEcN20dc4iL0Q==", + "dependencies": { + "@codemirror/lang-css": "^6.2.0", + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.0.2", + "@lezer/sass": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-sql": { + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/@codemirror/lang-sql/-/lang-sql-6.8.0.tgz", + "integrity": "sha512-aGLmY4OwGqN3TdSx3h6QeA1NrvaYtF7kkoWR/+W7/JzB0gQtJ+VJxewlnE3+VImhA4WVlhmkJr109PefOOhjLg==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-vue": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@codemirror/lang-vue/-/lang-vue-0.1.3.tgz", + "integrity": "sha512-QSKdtYTDRhEHCfo5zOShzxCmqKJvgGrZwDQSdbvCRJ5pRLWBS7pD/8e/tH44aVQT6FKm0t6RVNoSUWHOI5vNug==", + "dependencies": { + "@codemirror/lang-html": "^6.0.0", + "@codemirror/lang-javascript": "^6.1.2", + "@codemirror/language": "^6.0.0", + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.3.1" + } + }, + "node_modules/@codemirror/lang-wast": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@codemirror/lang-wast/-/lang-wast-6.0.2.tgz", + "integrity": "sha512-Imi2KTpVGm7TKuUkqyJ5NRmeFWF7aMpNiwHnLQe0x9kmrxElndyH0K6H/gXtWwY6UshMRAhpENsgfpSwsgmC6Q==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-xml": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@codemirror/lang-xml/-/lang-xml-6.1.0.tgz", + "integrity": "sha512-3z0blhicHLfwi2UgkZYRPioSgVTo9PV5GP5ducFH6FaHy0IAJRg+ixj5gTR1gnT/glAIC8xv4w2VL1LoZfs+Jg==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.4.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/common": "^1.0.0", + "@lezer/xml": "^1.0.0" + } + }, + "node_modules/@codemirror/lang-yaml": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/@codemirror/lang-yaml/-/lang-yaml-6.1.2.tgz", + "integrity": "sha512-dxrfG8w5Ce/QbT7YID7mWZFKhdhsaTNOYjOkSIMt1qmC4VQnXSDSYVHHHn8k6kJUfIhtLo8t1JJgltlxWdsITw==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.0.0", + "@lezer/yaml": "^1.0.0" + } + }, + "node_modules/@codemirror/language": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.11.0.tgz", + "integrity": "sha512-A7+f++LodNNc1wGgoRDTt78cOwWm9KVezApgjOMp1W4hM0898nsqBXwF+sbePE7ZRcjN7Sa1Z5m2oN27XkmEjQ==", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.23.0", + "@lezer/common": "^1.1.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0", + "style-mod": "^4.0.0" + } + }, + "node_modules/@codemirror/language-data": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@codemirror/language-data/-/language-data-6.5.1.tgz", + "integrity": "sha512-0sWxeUSNlBr6OmkqybUTImADFUP0M3P0IiSde4nc24bz/6jIYzqYSgkOSLS+CBIoW1vU8Q9KUWXscBXeoMVC9w==", + "dependencies": { + "@codemirror/lang-angular": "^0.1.0", + "@codemirror/lang-cpp": "^6.0.0", + "@codemirror/lang-css": "^6.0.0", + "@codemirror/lang-go": "^6.0.0", + "@codemirror/lang-html": "^6.0.0", + "@codemirror/lang-java": "^6.0.0", + "@codemirror/lang-javascript": "^6.0.0", + "@codemirror/lang-json": "^6.0.0", + "@codemirror/lang-less": "^6.0.0", + "@codemirror/lang-liquid": "^6.0.0", + "@codemirror/lang-markdown": "^6.0.0", + "@codemirror/lang-php": "^6.0.0", + "@codemirror/lang-python": "^6.0.0", + "@codemirror/lang-rust": "^6.0.0", + "@codemirror/lang-sass": "^6.0.0", + "@codemirror/lang-sql": "^6.0.0", + "@codemirror/lang-vue": "^0.1.1", + "@codemirror/lang-wast": "^6.0.0", + "@codemirror/lang-xml": "^6.0.0", + "@codemirror/lang-yaml": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/legacy-modes": "^6.4.0" + } + }, + "node_modules/@codemirror/legacy-modes": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@codemirror/legacy-modes/-/legacy-modes-6.5.1.tgz", + "integrity": "sha512-DJYQQ00N1/KdESpZV7jg9hafof/iBNp9h7TYo1SLMk86TWl9uDsVdho2dzd81K+v4retmK6mdC7WpuOQDytQqw==", + "dependencies": { + "@codemirror/language": "^6.0.0" + } + }, + "node_modules/@codemirror/lint": { + "version": "6.8.5", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.5.tgz", + "integrity": "sha512-s3n3KisH7dx3vsoeGMxsbRAgKe4O1vbrnKBClm99PU0fWxmxsx5rR2PfqQgIt+2MMJBHbiJ5rfIdLYfB9NNvsA==", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.35.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/search": { + "version": "6.5.11", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.11.tgz", + "integrity": "sha512-KmWepDE6jUdL6n8cAAqIpRmLPBZ5ZKnicE8oGU/s3QrAVID+0VhLFrzUucVKHG5035/BSykhExDL/Xm7dHthiA==", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/state": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.2.tgz", + "integrity": "sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==", + "dependencies": { + "@marijn/find-cluster-break": "^1.0.0" + } + }, + "node_modules/@codemirror/theme-one-dark": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/@codemirror/theme-one-dark/-/theme-one-dark-6.1.2.tgz", + "integrity": "sha512-F+sH0X16j/qFLMAfbciKTxVOwkdAS336b7AXTKOZhy8BR3eH/RelsnLgLFINrpST63mmN2OuwUt0W2ndUgYwUA==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/highlight": "^1.0.0" + } + }, + "node_modules/@codemirror/view": { + "version": "6.36.8", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.36.8.tgz", + "integrity": "sha512-yoRo4f+FdnD01fFt4XpfpMCcCAo9QvZOtbrXExn4SqzH32YC6LgzqxfLZw/r6Ge65xyY03mK/UfUqrVw1gFiFg==", + "dependencies": { + "@codemirror/state": "^6.5.0", + "style-mod": "^4.1.0", + "w3c-keyname": "^2.2.4" + } + }, "node_modules/@craco/craco": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/@craco/craco/-/craco-7.1.0.tgz", @@ -3111,6 +3477,166 @@ "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==" }, + "node_modules/@lezer/common": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz", + "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==" + }, + "node_modules/@lezer/cpp": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@lezer/cpp/-/cpp-1.1.3.tgz", + "integrity": "sha512-ykYvuFQKGsRi6IcE+/hCSGUhb/I4WPjd3ELhEblm2wS2cOznDFzO+ubK2c+ioysOnlZ3EduV+MVQFCPzAIoY3w==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/css": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/css/-/css-1.2.1.tgz", + "integrity": "sha512-2F5tOqzKEKbCUNraIXc0f6HKeyKlmMWJnBB0i4XW6dJgssrZO/YlZ2pY5xgyqDleqqhiNJ3dQhbrV2aClZQMvg==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.3.0" + } + }, + "node_modules/@lezer/go": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@lezer/go/-/go-1.0.1.tgz", + "integrity": "sha512-xToRsYxwsgJNHTgNdStpcvmbVuKxTapV0dM0wey1geMMRc9aggoVyKgzYp41D2/vVOx+Ii4hmE206kvxIXBVXQ==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.3.0" + } + }, + "node_modules/@lezer/highlight": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz", + "integrity": "sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/html": { + "version": "1.3.10", + "resolved": "https://registry.npmjs.org/@lezer/html/-/html-1.3.10.tgz", + "integrity": "sha512-dqpT8nISx/p9Do3AchvYGV3qYc4/rKr3IBZxlHmpIKam56P47RSHkSF5f13Vu9hebS1jM0HmtJIwLbWz1VIY6w==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/java": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@lezer/java/-/java-1.1.3.tgz", + "integrity": "sha512-yHquUfujwg6Yu4Fd1GNHCvidIvJwi/1Xu2DaKl/pfWIA2c1oXkVvawH3NyXhCaFx4OdlYBVX5wvz2f7Aoa/4Xw==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/javascript": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@lezer/javascript/-/javascript-1.5.1.tgz", + "integrity": "sha512-ATOImjeVJuvgm3JQ/bpo2Tmv55HSScE2MTPnKRMRIPx2cLhHGyX2VnqpHhtIV1tVzIjZDbcWQm+NCTF40ggZVw==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.1.3", + "@lezer/lr": "^1.3.0" + } + }, + "node_modules/@lezer/json": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/json/-/json-1.0.3.tgz", + "integrity": "sha512-BP9KzdF9Y35PDpv04r0VeSTKDeox5vVr3efE7eBbx3r4s3oNLfunchejZhjArmeieBH+nVOpgIiBJpEAv8ilqQ==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/lr": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz", + "integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/markdown": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.4.3.tgz", + "integrity": "sha512-kfw+2uMrQ/wy/+ONfrH83OkdFNM0ye5Xq96cLlaCy7h5UT9FO54DU4oRoIc0CSBh5NWmWuiIJA7NGLMJbQ+Oxg==", + "dependencies": { + "@lezer/common": "^1.0.0", + "@lezer/highlight": "^1.0.0" + } + }, + "node_modules/@lezer/php": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@lezer/php/-/php-1.0.2.tgz", + "integrity": "sha512-GN7BnqtGRpFyeoKSEqxvGvhJQiI4zkgmYnDk/JIyc7H7Ifc1tkPnUn/R2R8meH3h/aBf5rzjvU8ZQoyiNDtDrA==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.1.0" + } + }, + "node_modules/@lezer/python": { + "version": "1.1.18", + "resolved": "https://registry.npmjs.org/@lezer/python/-/python-1.1.18.tgz", + "integrity": "sha512-31FiUrU7z9+d/ElGQLJFXl+dKOdx0jALlP3KEOsGTex8mvj+SoE1FgItcHWK/axkxCHGUSpqIHt6JAWfWu9Rhg==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/rust": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@lezer/rust/-/rust-1.0.2.tgz", + "integrity": "sha512-Lz5sIPBdF2FUXcWeCu1//ojFAZqzTQNRga0aYv6dYXqJqPfMdCAI0NzajWUd4Xijj1IKJLtjoXRPMvTKWBcqKg==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/sass": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@lezer/sass/-/sass-1.0.7.tgz", + "integrity": "sha512-8HLlOkuX/SMHOggI2DAsXUw38TuURe+3eQ5hiuk9QmYOUyC55B1dYEIMkav5A4IELVaW4e1T4P9WRiI5ka4mdw==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/xml": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@lezer/xml/-/xml-1.0.6.tgz", + "integrity": "sha512-CdDwirL0OEaStFue/66ZmFSeppuL6Dwjlk8qk153mSQwiSH/Dlri4GNymrNWnUmPl2Um7QfV1FO9KFUyX3Twww==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/yaml": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/yaml/-/yaml-1.0.3.tgz", + "integrity": "sha512-GuBLekbw9jDBDhGur82nuwkxKQ+a3W5H0GfaAthDXcAu+XdpS43VlnxA9E9hllkpSP5ellRDKjLLj7Lu9Wr6xA==", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.4.0" + } + }, "node_modules/@mapbox/geojson-rewind": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/@mapbox/geojson-rewind/-/geojson-rewind-0.5.2.tgz", @@ -3216,6 +3742,11 @@ "integrity": "sha512-gRa9gwYU3ECmQYv3lslts5hxuIa90veaEcxDYuu3QGOIAEM2mOZkVHp48ANJuu1CURtRdHKUBY5Lm1tHV+sD4g==", "peer": true }, + "node_modules/@marijn/find-cluster-break": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz", + "integrity": "sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g==" + }, "node_modules/@mermaid-js/parser": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-0.4.0.tgz", @@ -5608,6 +6139,94 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@uiw/codemirror-extensions-basic-setup": { + "version": "4.23.12", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.12.tgz", + "integrity": "sha512-l9vuiXOTFDBetYrRLDmz3jDxQHDsrVAZ2Y6dVfmrqi2AsulsDu+y7csW0JsvaMqo79rYkaIZg8yeqmDgMb7VyQ==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "@codemirror/autocomplete": ">=6.0.0", + "@codemirror/commands": ">=6.0.0", + "@codemirror/language": ">=6.0.0", + "@codemirror/lint": ">=6.0.0", + "@codemirror/search": ">=6.0.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/view": ">=6.0.0" + } + }, + "node_modules/@uiw/codemirror-extensions-events": { + "version": "4.23.12", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-events/-/codemirror-extensions-events-4.23.12.tgz", + "integrity": "sha512-R6LDHHm5UjNtJnBT7nYbnrnOO/aTW9/T7Cl4sPbKcbqYpHp09WuqiYHEhPoRoSAbIO38pRXcrSKU2lW+gI+GAA==", + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "@codemirror/view": ">=6.0.0" + } + }, + "node_modules/@uiw/codemirror-themes": { + "version": "4.23.12", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-themes/-/codemirror-themes-4.23.12.tgz", + "integrity": "sha512-8etEByfS9yttFZW0rcWhdZc7/JXJKRWlU5lHmJCI3GydZNGCzydNA+HtK9nWKpJUndVc58Q2sqSC5OIcwq8y6A==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "@codemirror/language": ">=6.0.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/view": ">=6.0.0" + } + }, + "node_modules/@uiw/copy-to-clipboard": { + "version": "1.0.17", + "resolved": "https://registry.npmjs.org/@uiw/copy-to-clipboard/-/copy-to-clipboard-1.0.17.tgz", + "integrity": "sha512-O2GUHV90Iw2VrSLVLK0OmNIMdZ5fgEg4NhvtwINsX+eZ/Wf6DWD0TdsK9xwV7dNRnK/UI2mQtl0a2/kRgm1m1A==", + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, + "node_modules/@uiw/react-codemirror": { + "version": "4.23.12", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.12.tgz", + "integrity": "sha512-yseqWdzoAAGAW7i/NiU8YrfSLVOEBjQvSx1KpDTFVV/nn0AlAZoDVTIPEBgdXrPlVUQoCrwgpEaj3uZCklk9QA==", + "dependencies": { + "@babel/runtime": "^7.18.6", + "@codemirror/commands": "^6.1.0", + "@codemirror/state": "^6.1.1", + "@codemirror/theme-one-dark": "^6.0.0", + "@uiw/codemirror-extensions-basic-setup": "4.23.12", + "codemirror": "^6.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "@babel/runtime": ">=7.11.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/theme-one-dark": ">=6.0.0", + "@codemirror/view": ">=6.0.0", + "codemirror": ">=6.0.0", + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, "node_modules/@uiw/react-json-view": { "version": "2.0.0-alpha.31", "resolved": "https://registry.npmjs.org/@uiw/react-json-view/-/react-json-view-2.0.0-alpha.31.tgz", @@ -5621,6 +6240,77 @@ "react-dom": ">=18.0.0" } }, + "node_modules/@uiw/react-markdown-editor": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/@uiw/react-markdown-editor/-/react-markdown-editor-6.1.4.tgz", + "integrity": "sha512-Jt3tlppBNAnq8LI2xc9w1LJS4qUBh3M6gysdanQZkelCBC4dtlcRqI7KkKagWQrULG0Us88Go0yJZn0QUWzmdw==", + "dependencies": { + "@babel/runtime": "^7.22.6", + "@codemirror/lang-markdown": "^6.0.0", + "@codemirror/language-data": "^6.1.0", + "@uiw/codemirror-extensions-events": "^4.12.3", + "@uiw/codemirror-themes": "^4.12.3", + "@uiw/react-codemirror": "^4.12.3", + "@uiw/react-markdown-preview": "^5.0.0" + }, + "peerDependencies": { + "@babel/runtime": ">=7.10.0", + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@uiw/react-markdown-preview": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/@uiw/react-markdown-preview/-/react-markdown-preview-5.1.4.tgz", + "integrity": "sha512-6k13WVNHCEaamz3vh54OQ1tseIXneKlir1+E/VFQBPq8PRod+gwLfYtiitDBWu+ZFttoiKPLZ7flgHrVM+JNOg==", + "dependencies": { + "@babel/runtime": "^7.17.2", + "@uiw/copy-to-clipboard": "~1.0.12", + "react-markdown": "~9.0.1", + "rehype-attr": "~3.0.1", + "rehype-autolink-headings": "~7.1.0", + "rehype-ignore": "^2.0.0", + "rehype-prism-plus": "2.0.0", + "rehype-raw": "^7.0.0", + "rehype-rewrite": "~4.0.0", + "rehype-slug": "~6.0.0", + "remark-gfm": "~4.0.0", + "remark-github-blockquote-alert": "^1.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@uiw/react-markdown-preview/node_modules/react-markdown": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-9.0.3.tgz", + "integrity": "sha512-Yk7Z94dbgYTOrdk41Z74GoKA7rThnsbbqBTRYuxoe08qvfQ9tJVhmAKw6BJS/ZORG7kTy/s1QvYzSuaoBA1qfw==", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, "node_modules/@ungap/structured-clone": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", @@ -6617,6 +7307,15 @@ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" }, + "node_modules/bcp-47-match": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/bcp-47-match/-/bcp-47-match-2.0.3.tgz", + "integrity": "sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/bfj": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/bfj/-/bfj-7.1.0.tgz", @@ -7297,6 +7996,20 @@ "node": ">=4" } }, + "node_modules/codemirror": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.1.tgz", + "integrity": "sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + } + }, "node_modules/collect-v8-coverage": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", @@ -7694,6 +8407,11 @@ "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", "devOptional": true }, + "node_modules/crelt": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", + "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==" + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -7920,6 +8638,21 @@ "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz", "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==" }, + "node_modules/css-selector-parser": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/css-selector-parser/-/css-selector-parser-3.1.2.tgz", + "integrity": "sha512-WfUcL99xWDs7b3eZPoRszWVfbNo8ErCF15PTvVROjkShGlAfjIkG6hlfj/sl6/rfo5Q9x9ryJ3VqVnAZDA+gcw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, "node_modules/css-system-font-keywords": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/css-system-font-keywords/-/css-system-font-keywords-1.0.0.tgz", @@ -9053,6 +9786,18 @@ "node": ">=8" } }, + "node_modules/direction": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/direction/-/direction-2.0.1.tgz", + "integrity": "sha512-9S6m9Sukh1cZNknO1CWAr2QAWsbKLafQiyM5gZ7VgXHeuaoUwffKN4q6NC4A/Mf9iiPlOXQEKW/Mv/mh9/3YFA==", + "bin": { + "direction": "cli.js" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/dlv": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", @@ -11282,6 +12027,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/github-slugger": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz", + "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==" + }, "node_modules/gl-mat4": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gl-mat4/-/gl-mat4-1.2.0.tgz", @@ -11917,6 +12667,30 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/hast-util-has-property": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-has-property/-/hast-util-has-property-3.0.0.tgz", + "integrity": "sha512-MNilsvEKLFpV604hwfhVStK0usFY/QmM5zX16bo7EjnAEGofr5YyI37kzopBlZJkHD4t887i+q/C8/tr5Q94cA==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-heading-rank": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-heading-rank/-/hast-util-heading-rank-3.0.0.tgz", + "integrity": "sha512-EJKb8oMUXVHcWZTDepnr+WNbfnXKFNf9duMesmr4S8SXTJBJ9M4Yok08pu9vxdJwdlGRhVumk9mEhkEvKGifwA==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/hast-util-is-element": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz", @@ -11941,6 +12715,78 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/hast-util-raw": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz", + "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw/node_modules/entities": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.0.tgz", + "integrity": "sha512-aKstq2TDOndCn4diEyp9Uq/Flu2i1GlLkc6XIDQSDMuaFE3OPW5OphLCyQ5SpSJZTb4reN+kTcYru5yIfXoRPw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/hast-util-raw/node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/hast-util-select": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/hast-util-select/-/hast-util-select-6.0.4.tgz", + "integrity": "sha512-RqGS1ZgI0MwxLaKLDxjprynNzINEkRHY2i8ln4DDjgv9ZhcYVIHN9rlpiYsqtFwrgpYU361SyWDQcGNIBVu3lw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "bcp-47-match": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "css-selector-parser": "^3.0.0", + "devlop": "^1.0.0", + "direction": "^2.0.0", + "hast-util-has-property": "^3.0.0", + "hast-util-to-string": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "nth-check": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/hast-util-to-jsx-runtime": { "version": "2.3.6", "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", @@ -11967,6 +12813,45 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", + "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5/node_modules/property-information": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hast-util-to-string": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-string/-/hast-util-to-string-3.0.1.tgz", + "integrity": "sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/hast-util-to-text": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz", @@ -12142,6 +13027,15 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/html-webpack-plugin": { "version": "5.6.3", "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.3.tgz", @@ -16578,6 +17472,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" + }, "node_modules/parse-rect": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/parse-rect/-/parse-rect-1.2.0.tgz", @@ -18993,6 +19892,71 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/refractor": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/refractor/-/refractor-4.9.0.tgz", + "integrity": "sha512-nEG1SPXFoGGx+dcjftjv8cAjEusIh6ED1xhf5DG3C0x/k+rmZ2duKnc3QLpt6qeHv5fPb8uwN3VWN2BT7fr3Og==", + "dependencies": { + "@types/hast": "^2.0.0", + "@types/prismjs": "^1.0.0", + "hastscript": "^7.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/@types/hast": { + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", + "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/refractor/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/refractor/node_modules/hast-util-parse-selector": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-3.1.1.tgz", + "integrity": "sha512-jdlwBjEexy1oGz0aJ2f4GKMaVKkA9jwjr4MjAAI22E5fM/TXVZHuS5OpONtdeIkRKqAaryQ2E9xNQxijoThSZA==", + "dependencies": { + "@types/hast": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/refractor/node_modules/hastscript": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-7.2.0.tgz", + "integrity": "sha512-TtYPq24IldU8iKoJQqvZOuhi5CyCQRAbvDOX0x1eW6rsHSxa/1i2CCiptNTotGHJ3VoHRGmqiv6/D3q113ikkw==", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^3.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/refractor/node_modules/property-information": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/regenerate": { "version": "1.4.2", "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", @@ -19188,6 +20152,54 @@ "regl-scatter2d": "^3.2.3" } }, + "node_modules/rehype-attr": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/rehype-attr/-/rehype-attr-3.0.3.tgz", + "integrity": "sha512-Up50Xfra8tyxnkJdCzLBIBtxOcB2M1xdeKe1324U06RAvSjYm7ULSeoM+b/nYPQPVd7jsXJ9+39IG1WAJPXONw==", + "dependencies": { + "unified": "~11.0.0", + "unist-util-visit": "~5.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, + "node_modules/rehype-autolink-headings": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/rehype-autolink-headings/-/rehype-autolink-headings-7.1.0.tgz", + "integrity": "sha512-rItO/pSdvnvsP4QRB1pmPiNHUskikqtPojZKJPPPAVx9Hj8i8TwMBhofrrAYRhYOOBZH9tgmG5lPqDLuIWPWmw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-heading-rank": "^3.0.0", + "hast-util-is-element": "^3.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-ignore": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/rehype-ignore/-/rehype-ignore-2.0.2.tgz", + "integrity": "sha512-BpAT/3lU9DMJ2siYVD/dSR0A/zQgD6Fb+fxkJd4j+wDVy6TYbYpK+FZqu8eM9EuNKGvi4BJR7XTZ/+zF02Dq8w==", + "dependencies": { + "hast-util-select": "^6.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, "node_modules/rehype-katex": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz", @@ -19206,6 +20218,79 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/rehype-parse": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-9.0.1.tgz", + "integrity": "sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-from-html": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-prism-plus": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/rehype-prism-plus/-/rehype-prism-plus-2.0.0.tgz", + "integrity": "sha512-FeM/9V2N7EvDZVdR2dqhAzlw5YI49m9Tgn7ZrYJeYHIahM6gcXpH0K1y2gNnKanZCydOMluJvX2cB9z3lhY8XQ==", + "dependencies": { + "hast-util-to-string": "^3.0.0", + "parse-numeric-range": "^1.3.0", + "refractor": "^4.8.0", + "rehype-parse": "^9.0.0", + "unist-util-filter": "^5.0.0", + "unist-util-visit": "^5.0.0" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-rewrite": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/rehype-rewrite/-/rehype-rewrite-4.0.2.tgz", + "integrity": "sha512-rjLJ3z6fIV11phwCqHp/KRo8xuUCO8o9bFJCNw5o6O2wlLk6g8r323aRswdGBQwfXPFYeSuZdAjp4tzo6RGqEg==", + "dependencies": { + "hast-util-select": "^6.0.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0" + }, + "engines": { + "node": ">=16.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, + "node_modules/rehype-slug": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/rehype-slug/-/rehype-slug-6.0.0.tgz", + "integrity": "sha512-lWyvf/jwu+oS5+hL5eClVd3hNdmwM1kAC0BUvEGD19pajQMIzcNUd/k9GsfQ+FfECvX+JE+e9/btsKH0EjJT6A==", + "dependencies": { + "@types/hast": "^3.0.0", + "github-slugger": "^2.0.0", + "hast-util-heading-rank": "^3.0.0", + "hast-util-to-string": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/relateurl": { "version": "0.2.7", "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", @@ -19231,6 +20316,20 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/remark-github-blockquote-alert": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/remark-github-blockquote-alert/-/remark-github-blockquote-alert-1.3.1.tgz", + "integrity": "sha512-OPNnimcKeozWN1w8KVQEuHOxgN3L4rah8geMOLhA5vN9wITqU4FWD+G26tkEsCGHiOVDbISx+Se5rGZ+D1p0Jg==", + "dependencies": { + "unist-util-visit": "^5.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + } + }, "node_modules/remark-math": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/remark-math/-/remark-math-6.0.0.tgz", @@ -20746,6 +21845,11 @@ "webpack": "^5.0.0" } }, + "node_modules/style-mod": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz", + "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==" + }, "node_modules/style-to-js": { "version": "1.1.16", "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.16.tgz", @@ -21877,6 +22981,16 @@ "node": ">=8" } }, + "node_modules/unist-util-filter": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/unist-util-filter/-/unist-util-filter-5.0.1.tgz", + "integrity": "sha512-pHx7D4Zt6+TsfwylH9+lYhBhzyhEnCXs/lbq/Hstxno5z4gVdyc2WEW0asfjGKPyG4pEKrnBv5hdkO6+aRnQJw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + } + }, "node_modules/unist-util-find-after": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz", @@ -22222,6 +23336,11 @@ "browser-process-hrtime": "^1.0.0" } }, + "node_modules/w3c-keyname": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", + "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==" + }, "node_modules/w3c-xmlserializer": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-2.0.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index 8d41403..7f98f71 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -19,6 +19,7 @@ "@types/react": "^19.0.12", "@types/react-dom": "^19.0.4", "@uiw/react-json-view": "^2.0.0-alpha.31", + "@uiw/react-markdown-editor": "^6.1.4", "jsonrepair": "^3.12.0", "markdown-it": "^14.1.0", "mermaid": "^11.6.0", diff --git a/frontend/src/ControlsPage.tsx b/frontend/src/ControlsPage.tsx index 3adf084..4e6b7ed 100644 --- a/frontend/src/ControlsPage.tsx +++ b/frontend/src/ControlsPage.tsx @@ -113,6 +113,7 @@ const ControlsPage = (props: BackstoryPageProps) => { const tunables = await response.json(); serverTunables.system_prompt = tunables.system_prompt; + console.log(tunables); setSystemPrompt(tunables.system_prompt) setSnack("System prompt updated", "success"); } catch (error) { @@ -167,31 +168,36 @@ const ControlsPage = (props: BackstoryPageProps) => { body: JSON.stringify({ "reset": types }), }); - if (response.ok) { - const data = await response.json(); - if (data.error) { - throw Error() - } - for (const [key, value] of Object.entries(data)) { - switch (key) { - case "rags": - setRags(value as Tool[]); - break; - case "tools": - setTools(value as Tool[]); - break; - case "system_prompt": - setSystemPrompt((value as ServerTunables)["system_prompt"].trim()); - break; - case "history": - console.log('TODO: handle history reset'); - break; - } - } - setSnack(message, "success"); - } else { - throw Error(`${{ status: response.status, message: response.statusText }}`); + if (!response.ok) { + throw new Error(`Server responded with ${response.status}: ${response.statusText}`); } + + if (!response.body) { + throw new Error('Response body is null'); + } + + const data = await response.json(); + if (data.error) { + throw Error(data.error); + } + + for (const [key, value] of Object.entries(data)) { + switch (key) { + case "rags": + setRags(value as Tool[]); + break; + case "tools": + setTools(value as Tool[]); + break; + case "system_prompt": + setSystemPrompt((value as ServerTunables)["system_prompt"].trim()); + break; + case "history": + console.log('TODO: handle history reset'); + break; + } + } + setSnack(message, "success"); } catch (error) { console.error('Fetch error:', error); setSnack("Unable to restore defaults", "error"); @@ -203,20 +209,37 @@ const ControlsPage = (props: BackstoryPageProps) => { if (systemInfo !== undefined || sessionId === undefined) { return; } - fetch(connectionBase + `/api/system-info/${sessionId}`, { - method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, - }) - .then(response => response.json()) - .then(data => { + const fetchSystemInfo = async () => { + try { + const response = await fetch(connectionBase + `/api/system-info/${sessionId}`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }) + + if (!response.ok) { + throw new Error(`Server responded with ${response.status}: ${response.statusText}`); + } + + if (!response.body) { + throw new Error('Response body is null'); + } + + const data = await response.json(); + if (data.error) { + throw Error(data.error); + } + setSystemInfo(data); - }) - .catch(error => { + } catch (error) { console.error('Error obtaining system information:', error); setSnack("Unable to obtain system information.", "error"); - }); + }; + } + + fetchSystemInfo(); + }, [systemInfo, setSystemInfo, setSnack, sessionId]) useEffect(() => { @@ -273,25 +296,30 @@ const ControlsPage = (props: BackstoryPageProps) => { return; } const fetchTunables = async () => { - // Make the fetch request with proper headers - const response = await fetch(connectionBase + `/api/tunables/${sessionId}`, { - method: 'GET', - headers: { - 'Content-Type': 'application/json', - 'Accept': 'application/json', - }, - }); - const data = await response.json(); - // console.log("Server tunables: ", data); - setServerTunables(data); - setSystemPrompt(data["system_prompt"]); - setMessageHistoryLength(data["message_history_length"]); - setTools(data["tools"]); - setRags(data["rags"]); + try { + // Make the fetch request with proper headers + const response = await fetch(connectionBase + `/api/tunables/${sessionId}`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + }, + }); + const data = await response.json(); + // console.log("Server tunables: ", data); + setServerTunables(data); + setSystemPrompt(data["system_prompt"]); + setMessageHistoryLength(data["message_history_length"]); + setTools(data["tools"]); + setRags(data["rags"]); + } catch (error) { + console.error('Fetch error:', error); + setSnack("System prompt update failed", "error"); + } } fetchTunables(); - }, [sessionId, setServerTunables, setSystemPrompt, setMessageHistoryLength, serverTunables, setTools, setRags]); + }, [sessionId, setServerTunables, setSystemPrompt, setMessageHistoryLength, serverTunables, setTools, setRags, setSnack]); const toggle = async (type: string, index: number) => { switch (type) { diff --git a/frontend/src/Conversation.tsx b/frontend/src/Conversation.tsx index c1b6a61..8c7a4c0 100644 --- a/frontend/src/Conversation.tsx +++ b/frontend/src/Conversation.tsx @@ -285,7 +285,9 @@ const Conversation = forwardRef((props: C throw new Error('Response body is null'); } - setConversation([]) + setProcessingMessage(undefined); + setStreamingMessage(undefined); + setConversation([]); setNoInteractions(true); } catch (e) { @@ -341,13 +343,23 @@ const Conversation = forwardRef((props: C // Add a small delay to ensure React has time to update the UI await new Promise(resolve => setTimeout(resolve, 0)); + let data: any = query; + if (type === "job_description") { + data = { + prompt: "", + agent_options: { + job_description: query.prompt, + } + } + } + const response = await fetch(connectionBase + `/api/${type}/${sessionId}`, { method: 'POST', headers: { 'Content-Type': 'application/json', 'Accept': 'application/json', }, - body: JSON.stringify(query) + body: JSON.stringify(data) }); setSnack(`Query sent.`, "info"); diff --git a/frontend/src/Message.tsx b/frontend/src/Message.tsx index ab21737..a520bc6 100644 --- a/frontend/src/Message.tsx +++ b/frontend/src/Message.tsx @@ -77,7 +77,7 @@ interface MessageMetaData { vector_embedding: number[]; }, origin: string, - rag: any, + rag: any[], tools?: { tool_calls: any[], }, @@ -117,8 +117,6 @@ const MessageMeta = (props: MessageMetaProps) => { } = props.metadata || {}; const message: any = props.messageProps.message; - rag.forEach((r: any) => r.query = message.prompt); - let llm_submission: string = "<|system|>\n" llm_submission += message.system_prompt + "\n\n" llm_submission += message.context_prompt @@ -176,7 +174,10 @@ const MessageMeta = (props: MessageMetaProps) => { {tool.name} - + { if (typeof (children) === "string" && children.match("\n")) { diff --git a/frontend/src/ResumeBuilderPage.tsx b/frontend/src/ResumeBuilderPage.tsx index 3591d61..06553ac 100644 --- a/frontend/src/ResumeBuilderPage.tsx +++ b/frontend/src/ResumeBuilderPage.tsx @@ -151,7 +151,6 @@ const ResumeBuilderPage: React.FC = (props: BackstoryPagePro }, []); const jobResponse = useCallback(async (message: BackstoryMessage) => { - console.log('onJobResponse', message); if (message.actions && message.actions.includes("job_description")) { await jobConversationRef.current.fetchHistory(); } diff --git a/frontend/src/StyledMarkdown.tsx b/frontend/src/StyledMarkdown.tsx index d5a800d..3b8e853 100644 --- a/frontend/src/StyledMarkdown.tsx +++ b/frontend/src/StyledMarkdown.tsx @@ -53,7 +53,7 @@ const StyledMarkdown: React.FC = (props: StyledMarkdownProp }} displayDataTypes={false} objectSortKeys={false} - collapsed={true} + collapsed={1} shortenTextAfterLength={100} value={fixed}> } - + {node === null && Click a point in the scatter-graph to see information about that node. } - {!inline && node !== null && node.full_content && + {node !== null && node.full_content && { index += 1 + node.chunk_begin; const bgColor = (index > node.line_begin && index <= node.line_end) ? '#f0f0f0' : 'auto'; - return + return {index}
{line || " "}
; diff --git a/frontend/src/useAutoScrollToBottom.tsx b/frontend/src/useAutoScrollToBottom.tsx index 79fc434..1edfa87 100644 --- a/frontend/src/useAutoScrollToBottom.tsx +++ b/frontend/src/useAutoScrollToBottom.tsx @@ -118,6 +118,10 @@ const useAutoScrollToBottom = ( let shouldScroll = false; const scrollTo = scrollToRef.current; + if (isPasteEvent && !scrollTo) { + console.error("Paste Event triggered without scrollTo"); + } + if (scrollTo) { // Get positions const containerRect = container.getBoundingClientRect(); @@ -130,7 +134,7 @@ const useAutoScrollToBottom = ( scrollToRect.top < containerBottom && scrollToRect.bottom > containerTop; // Scroll on paste or if TextField is visible and user isn't scrolling up - shouldScroll = (isPasteEvent || isTextFieldVisible) && !isUserScrollingUpRef.current; + shouldScroll = isPasteEvent || (isTextFieldVisible && !isUserScrollingUpRef.current); if (shouldScroll) { requestAnimationFrame(() => { debug && console.debug('Scrolling to container bottom:', { @@ -198,10 +202,12 @@ const useAutoScrollToBottom = ( } const handlePaste = () => { + console.log("handlePaste"); // Delay scroll check to ensure DOM updates setTimeout(() => { + console.log("scrolling for handlePaste"); requestAnimationFrame(() => checkAndScrollToBottom(true)); - }, 0); + }, 100); }; window.addEventListener('mousemove', pauseScroll); diff --git a/src/pyproject.toml b/src/pyproject.toml index a0ee6fa..40a4092 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -1,5 +1,5 @@ [tool.black] -line-length = 88 +line-length = 120 target-version = ['py312'] include = '\.pyi?$' exclude = ''' @@ -19,4 +19,4 @@ ignore_decorators = [ "@model_validator", "@override", "@classmethod" ] -exclude = ["tests/", "__pycache__/"] \ No newline at end of file +exclude = ["tests/", "__pycache__/"] diff --git a/src/server.py b/src/server.py index d68f630..4c3ac92 100644 --- a/src/server.py +++ b/src/server.py @@ -1,7 +1,9 @@ LLM_TIMEOUT = 600 from utils import logger -from pydantic import BaseModel, Field # type: ignore +from pydantic import BaseModel, Field, ValidationError # type: ignore +from pydantic_core import PydanticSerializationError # type: ignore +from typing import List from typing import AsyncGenerator, Dict, Optional @@ -61,6 +63,7 @@ from prometheus_client import CollectorRegistry, Counter # type: ignore from utils import ( rag as Rag, + ChromaDBGetResponse, tools as Tools, Context, Conversation, @@ -69,15 +72,17 @@ from utils import ( Metrics, Tunables, defines, + check_serializable, logger, ) -rags = [ - { - "name": "JPK", - "enabled": True, - "description": "Expert data about James Ketrenos, including work history, personal hobbies, and projects.", - }, + +rags : List[ChromaDBGetResponse] = [ + ChromaDBGetResponse( + name="JPK", + enabled=True, + description="Expert data about James Ketrenos, including work history, personal hobbies, and projects.", + ), # { "name": "LKML", "enabled": False, "description": "Full associative data for entire LKML mailing list archive." }, ] @@ -461,10 +466,8 @@ class WebServer: context = self.upsert_context(context_id) agent = context.get_agent(agent_type) if not agent: - return JSONResponse( - {"error": f"{agent_type} is not recognized", "context": context.id}, - status_code=404, - ) + response = { "history": [] } + return JSONResponse(response) data = await request.json() try: @@ -475,8 +478,8 @@ class WebServer: logger.info(f"Resetting {reset_operation}") case "rags": logger.info(f"Resetting {reset_operation}") - context.rags = rags.copy() - response["rags"] = context.rags + context.rags = [ r.model_copy() for r in rags] + response["rags"] = [ r.model_dump(mode="json") for r in context.rags ] case "tools": logger.info(f"Resetting {reset_operation}") context.tools = Tools.enabled_tools(Tools.tools) @@ -537,6 +540,7 @@ class WebServer: data = await request.json() agent = context.get_agent("chat") if not agent: + logger.info("chat agent does not exist on this context!") return JSONResponse( {"error": f"chat is not recognized", "context": context.id}, status_code=404, @@ -572,20 +576,20 @@ class WebServer: case "rags": # { "rags": [{ "tool": tool?.name, "enabled": tool.enabled }] } - rags: list[dict[str, Any]] = data[k] - if not rags: + rag_configs: list[dict[str, Any]] = data[k] + if not rag_configs: return JSONResponse( { "status": "error", "message": "RAGs can not be empty.", } ) - for rag in rags: + for config in rag_configs: for context_rag in context.rags: - if context_rag["name"] == rag["name"]: - context_rag["enabled"] = rag["enabled"] + if context_rag.name == config["name"]: + context_rag.enabled = config["enabled"] self.save_context(context_id) - return JSONResponse({"rags": context.rags}) + return JSONResponse({"rags": [ r.model_dump(mode="json") for r in context.rags]}) case "system_prompt": system_prompt = data[k].strip() @@ -615,12 +619,10 @@ class WebServer: @self.app.get("/api/tunables/{context_id}") async def get_tunables(context_id: str, request: Request): logger.info(f"{request.method} {request.url.path}") - if not is_valid_uuid(context_id): - logger.warning(f"Invalid context_id: {context_id}") - return JSONResponse({"error": "Invalid context_id"}, status_code=400) context = self.upsert_context(context_id) agent = context.get_agent("chat") if not agent: + logger.info("chat agent does not exist on this context!") return JSONResponse( {"error": f"chat is not recognized", "context": context.id}, status_code=404, @@ -629,7 +631,7 @@ class WebServer: { "system_prompt": agent.system_prompt, "message_history_length": context.message_history_length, - "rags": context.rags, + "rags": [ r.model_dump(mode="json") for r in context.rags ], "tools": [ { **t["function"], @@ -674,6 +676,7 @@ class WebServer: error = { "error": f"Attempt to create agent type: {agent_type} failed: {e}" } + logger.info(error) return JSONResponse(error, status_code=404) try: @@ -887,8 +890,35 @@ class WebServer: file_path = os.path.join(defines.context_dir, context_id) # Serialize the data to JSON and write to file - with open(file_path, "w") as f: - f.write(context.model_dump_json(by_alias=True)) + try: + # Check for non-serializable fields before dumping + serialization_errors = check_serializable(context) + if serialization_errors: + for error in serialization_errors: + logger.error(error) + raise ValueError("Found non-serializable fields in the model") + # Dump the model prior to opening file in case there is + # a validation error so it doesn't delete the current + # context session + json_data = context.model_dump_json(by_alias=True) + with open(file_path, "w") as f: + f.write(json_data) + except ValidationError as e: + logger.error(e) + logger.error(traceback.format_exc()) + for error in e.errors(): + print(f"Field: {error['loc'][0]}, Error: {error['msg']}") + except PydanticSerializationError as e: + logger.error(e) + logger.error(traceback.format_exc()) + logger.error(f"Serialization error: {str(e)}") + # Inspect the model to identify problematic fields + for field_name, value in context.__dict__.items(): + if isinstance(value, np.ndarray): + logger.error(f"Field '{field_name}' contains non-serializable type: {type(value)}") + except Exception as e: + logger.error(traceback.format_exc()) + logger.error(e) return context_id @@ -942,12 +972,8 @@ class WebServer: self.contexts[context_id] = context logger.info(f"Successfully loaded context {context_id}") - except json.JSONDecodeError as e: - logger.error(f"Invalid JSON in file: {e}") except Exception as e: logger.error(f"Error validating context: {str(e)}") - import traceback - logger.error(traceback.format_exc()) # Fallback to creating a new context self.contexts[context_id] = Context( @@ -985,7 +1011,7 @@ class WebServer: # context.add_agent(JobDescription(system_prompt = system_job_description)) # context.add_agent(FactCheck(system_prompt = system_fact_check)) context.tools = Tools.enabled_tools(Tools.tools) - context.rags = rags.copy() + context.rags_enabled = [ r.name for r in rags ] logger.info(f"{context.id} created and added to contexts.") self.contexts[context.id] = context diff --git a/src/tests/__pycache__/__init__.cpython-312.pyc b/src/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..f528223 Binary files /dev/null and b/src/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/src/tests/__pycache__/test-context.cpython-312.pyc b/src/tests/__pycache__/test-context.cpython-312.pyc new file mode 100644 index 0000000..590dc11 Binary files /dev/null and b/src/tests/__pycache__/test-context.cpython-312.pyc differ diff --git a/src/tests/__pycache__/test-embedding.cpython-312.pyc b/src/tests/__pycache__/test-embedding.cpython-312.pyc new file mode 100644 index 0000000..18e6fbe Binary files /dev/null and b/src/tests/__pycache__/test-embedding.cpython-312.pyc differ diff --git a/src/tests/__pycache__/test-message.cpython-312.pyc b/src/tests/__pycache__/test-message.cpython-312.pyc new file mode 100644 index 0000000..f8f3a24 Binary files /dev/null and b/src/tests/__pycache__/test-message.cpython-312.pyc differ diff --git a/src/tests/__pycache__/test-rag.cpython-312.pyc b/src/tests/__pycache__/test-rag.cpython-312.pyc new file mode 100644 index 0000000..fe1925a Binary files /dev/null and b/src/tests/__pycache__/test-rag.cpython-312.pyc differ diff --git a/src/tests/test-context.py b/src/tests/test-context.py index 150bff2..1e42db5 100644 --- a/src/tests/test-context.py +++ b/src/tests/test-context.py @@ -20,5 +20,5 @@ observer, file_watcher = Rag.start_file_watcher( ) context = Context(file_watcher=file_watcher) -data = context.model_dump(mode="json") -context = Context.from_json(json.dumps(data), file_watcher=file_watcher) +json_data = context.model_dump(mode="json") +context = Context.model_validate(json_data) diff --git a/src/tests/test-embedding.py b/src/tests/test-embedding.py new file mode 100644 index 0000000..1b84544 --- /dev/null +++ b/src/tests/test-embedding.py @@ -0,0 +1,89 @@ +# From /opt/backstory run: +# python -m src.tests.test-embedding +import numpy as np # type: ignore +import logging +import argparse +from ollama import Client # type: ignore +from ..utils import defines + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", +) + +def get_embedding(text: str, embedding_model: str, ollama_server: str) -> np.ndarray: + """Generate and normalize an embedding for the given text.""" + llm = Client(host=ollama_server) + + # Get embedding + try: + response = llm.embeddings(model=embedding_model, prompt=text) + embedding = np.array(response["embedding"]) + except Exception as e: + logging.error(f"Failed to get embedding: {e}") + raise + + # Log diagnostics + logging.info(f"Input text: {text}") + logging.info(f"Embedding shape: {embedding.shape}, First 5 values: {embedding[:5]}") + + # Check for invalid embeddings + if embedding.size == 0 or np.any(np.isnan(embedding)) or np.any(np.isinf(embedding)): + logging.error("Invalid embedding: contains NaN, infinite, or empty values.") + raise ValueError("Invalid embedding returned from Ollama.") + + # Check normalization + norm = np.linalg.norm(embedding) + is_normalized = np.allclose(norm, 1.0, atol=1e-3) + logging.info(f"Embedding norm: {norm}, Is normalized: {is_normalized}") + + # Normalize if needed + if not is_normalized: + embedding = embedding / norm + logging.info("Embedding normalized manually.") + + return embedding + +def main(): + """Main function to generate and normalize an embedding from command-line input.""" + parser = argparse.ArgumentParser(description="Generate embeddings for text using mxbai-embed-large.") + parser.add_argument( + "--text", + type=str, + nargs="+", # Allow multiple text inputs + default=["Test sentence."], + help="Text(s) to generate embeddings for (default: 'Test sentence.')", + ) + parser.add_argument( + "--ollama-server", + type=str, + default=defines.ollama_api_url, + help=f"Ollama server URL (default: {defines.ollama_api_url})", + ) + parser.add_argument( + "--embedding-model", + type=str, + default=defines.embedding_model, + help=f"Embedding model name (default: {defines.embedding_model})", + ) + args = parser.parse_args() + + # Validate input + for text in args.text: + if not text or not isinstance(text, str): + logging.error("Input text must be a non-empty string.") + raise ValueError("Input text must be a non-empty string.") + + # Generate embeddings for each text + embeddings = [] + for text in args.text: + embedding = get_embedding( + text=text, + embedding_model=args.embedding_model, + ollama_server=args.ollama_server, + ) + embeddings.append(embedding) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/tests/test-message.py b/src/tests/test-message.py index 6de3b5b..8ebf17a 100644 --- a/src/tests/test-message.py +++ b/src/tests/test-message.py @@ -2,10 +2,15 @@ # python -m src.tests.test-message from ..utils import logger -from ..utils import Message +from ..utils import Message, MessageMetaData +from ..utils import ChromaDBGetResponse import json prompt = "This is a test" message = Message(prompt=prompt) print(message.model_dump(mode="json")) +#message.metadata = MessageMetaData() +rag = ChromaDBGetResponse() +message.metadata.rag = rag +print(message.model_dump(mode="json")) diff --git a/src/tests/test-rag.py b/src/tests/test-rag.py new file mode 100644 index 0000000..99911e8 --- /dev/null +++ b/src/tests/test-rag.py @@ -0,0 +1,81 @@ +# From /opt/backstory run: +# python -m src.tests.test-rag +from ..utils import logger +from pydantic import BaseModel, field_validator # type: ignore +from prometheus_client import CollectorRegistry # type: ignore +from typing import List, Dict, Any, Optional +import ollama +import numpy as np # type: ignore +from ..utils import (rag as Rag, ChromaDBGetResponse) +from ..utils import Context +from ..utils import defines + +import json + +chroma_results = { + "ids": ["1", "2"], + "embeddings": np.array([[1.0, 2.0], [3.0, 4.0]]), + "documents": ["doc1", "doc2"], + "metadatas": [{"meta": "data1"}, {"meta": "data2"}], + "query_embedding": np.array([0.1, 0.2, 0.3]) +} + +query_embedding = np.array(chroma_results["query_embedding"]).flatten() +umap_2d = np.array([0.4, 0.5]) # Example UMAP output +umap_3d = np.array([0.6, 0.7, 0.8]) # Example UMAP output + +rag_metadata = ChromaDBGetResponse( + query="test", + query_embedding=query_embedding, + name="JPK", + ids=chroma_results.get("ids", []), + size=2 +) + +logger.info(json.dumps(rag_metadata.model_dump(mode="json"))) + +logger.info(f"Assigning type {type(umap_2d)} to rag_metadata.umap_embedding_2d") +rag_metadata.umap_embedding_2d = umap_2d + +logger.info(json.dumps(rag_metadata.model_dump(mode="json"))) + +rag = ChromaDBGetResponse() +rag.embeddings = np.array([[1.0, 2.0], [3.0, 4.0]]) +json_str = rag.model_dump(mode="json") +logger.info(json_str) +rag = ChromaDBGetResponse.model_validate(json_str) +llm = ollama.Client(host=defines.ollama_api_url) # type: ignore +prometheus_collector = CollectorRegistry() +observer, file_watcher = Rag.start_file_watcher( + llm=llm, + watch_directory=defines.doc_dir, + recreate=False, # Don't recreate if exists +) +context = Context( + file_watcher=file_watcher, + prometheus_collector=prometheus_collector, +) +skill="Codes in C++" +if context.file_watcher: + chroma_results = context.file_watcher.find_similar(query=skill, top_k=10, threshold=0.5) + if chroma_results: + query_embedding = np.array(chroma_results["query_embedding"]).flatten() + + umap_2d = context.file_watcher.umap_model_2d.transform([query_embedding])[0] + umap_3d = context.file_watcher.umap_model_3d.transform([query_embedding])[0] + + rag_metadata = ChromaDBGetResponse( + query=skill, + query_embedding=query_embedding, + name="JPK", + ids=chroma_results.get("ids", []), + embeddings=chroma_results.get("embeddings", []), + documents=chroma_results.get("documents", []), + metadatas=chroma_results.get("metadatas", []), + umap_embedding_2d=umap_2d, + umap_embedding_3d=umap_3d, + size=context.file_watcher.collection.count() + ) + +json_str = context.model_dump(mode="json") +logger.info(json_str) diff --git a/src/utils/__init__.py b/src/utils/__init__.py index bcf65e4..82797ba 100644 --- a/src/utils/__init__.py +++ b/src/utils/__init__.py @@ -1,25 +1,34 @@ from __future__ import annotations from pydantic import BaseModel # type: ignore +from typing import ( + Any, + Set +) import importlib +import json from . import defines from .context import Context from .conversation import Conversation -from .message import Message, Tunables -from .rag import ChromaDBFileWatcher, start_file_watcher +from .message import Message, Tunables, MessageMetaData +from .rag import ChromaDBFileWatcher, ChromaDBGetResponse, start_file_watcher from .setup_logging import setup_logging from .agents import class_registry, AnyAgent, Agent, __all__ as agents_all from .metrics import Metrics +from .check_serializable import check_serializable __all__ = [ "Agent", + "Message", "Tunables", + "MessageMetaData", "Context", "Conversation", - "Message", "Metrics", "ChromaDBFileWatcher", + 'ChromaDBGetResponse', "start_file_watcher", + "check_serializable", "logger", ] diff --git a/src/utils/agents/base.py b/src/utils/agents/base.py index 9a9dc50..f1eb06e 100644 --- a/src/utils/agents/base.py +++ b/src/utils/agents/base.py @@ -165,10 +165,9 @@ class Agent(BaseModel, ABC): if message.status != "done": yield message - if "rag" in message.metadata and message.metadata["rag"]: - for rag in message.metadata["rag"]: - for doc in rag["documents"]: - rag_context += f"{doc}\n" + for rag in message.metadata.rag: + for doc in rag.documents: + rag_context += f"{doc}\n" message.preamble = {} @@ -189,7 +188,7 @@ class Agent(BaseModel, ABC): llm: Any, model: str, message: Message, - tool_message: Any, + tool_message: Any, # llama response message messages: List[LLMMessage], ) -> AsyncGenerator[Message, None]: logger.info(f"{self.agent_type} - {inspect.stack()[0].function}") @@ -199,10 +198,10 @@ class Agent(BaseModel, ABC): if not self.context: raise ValueError("Context is not set for this agent.") - if not message.metadata["tools"]: + if not message.metadata.tools: raise ValueError("tools field not initialized") - tool_metadata = message.metadata["tools"] + tool_metadata = message.metadata.tools tool_metadata["tool_calls"] = [] message.status = "tooling" @@ -301,8 +300,7 @@ class Agent(BaseModel, ABC): model=model, messages=messages, options={ - **message.metadata["options"], - # "temperature": 0.5, + **message.metadata.options, }, stream=True, ): @@ -316,12 +314,10 @@ class Agent(BaseModel, ABC): if response.done: self.collect_metrics(response) - message.metadata["eval_count"] += response.eval_count - message.metadata["eval_duration"] += response.eval_duration - message.metadata["prompt_eval_count"] += response.prompt_eval_count - message.metadata[ - "prompt_eval_duration" - ] += response.prompt_eval_duration + message.metadata.eval_count += response.eval_count + message.metadata.eval_duration += response.eval_duration + message.metadata.prompt_eval_count += response.prompt_eval_count + message.metadata.prompt_eval_duration += response.prompt_eval_duration self.context_tokens = ( response.prompt_eval_count + response.eval_count ) @@ -329,9 +325,7 @@ class Agent(BaseModel, ABC): yield message end_time = time.perf_counter() - message.metadata["timers"][ - "llm_with_tools" - ] = f"{(end_time - start_time):.4f}" + message.metadata.timers["llm_with_tools"] = end_time - start_time return def collect_metrics(self, response): @@ -370,22 +364,22 @@ class Agent(BaseModel, ABC): LLMMessage(role="user", content=message.context_prompt.strip()) ) - # message.metadata["messages"] = messages - message.metadata["options"] = { + # message.messages = messages + message.metadata.options = { "seed": 8911, "num_ctx": self.context_size, "temperature": temperature, # Higher temperature to encourage tool usage } # Create a dict for storing various timing stats - message.metadata["timers"] = {} + message.metadata.timers = {} use_tools = message.tunables.enable_tools and len(self.context.tools) > 0 - message.metadata["tools"] = { + message.metadata.tools = { "available": llm_tools(self.context.tools), "used": False, } - tool_metadata = message.metadata["tools"] + tool_metadata = message.metadata.tools if use_tools: message.status = "thinking" @@ -408,17 +402,14 @@ class Agent(BaseModel, ABC): messages=tool_metadata["messages"], tools=tool_metadata["available"], options={ - **message.metadata["options"], - # "num_predict": 1024, # "Low" token limit to cut off after tool call + **message.metadata.options, }, stream=False, # No need to stream the probe ) self.collect_metrics(response) end_time = time.perf_counter() - message.metadata["timers"][ - "tool_check" - ] = f"{(end_time - start_time):.4f}" + message.metadata.timers["tool_check"] = end_time - start_time if not response.message.tool_calls: logger.info("LLM indicates tools will not be used") # The LLM will not use tools, so disable use_tools so we can stream the full response @@ -442,16 +433,14 @@ class Agent(BaseModel, ABC): messages=tool_metadata["messages"], # messages, tools=tool_metadata["available"], options={ - **message.metadata["options"], + **message.metadata.options, }, stream=False, ) self.collect_metrics(response) end_time = time.perf_counter() - message.metadata["timers"][ - "non_streaming" - ] = f"{(end_time - start_time):.4f}" + message.metadata.timers["non_streaming"] = end_time - start_time if not response: message.status = "error" @@ -475,9 +464,7 @@ class Agent(BaseModel, ABC): return yield message end_time = time.perf_counter() - message.metadata["timers"][ - "process_tool_calls" - ] = f"{(end_time - start_time):.4f}" + message.metadata.timers["process_tool_calls"] = end_time - start_time message.status = "done" return @@ -498,7 +485,7 @@ class Agent(BaseModel, ABC): model=model, messages=messages, options={ - **message.metadata["options"], + **message.metadata.options, }, stream=True, ): @@ -517,12 +504,10 @@ class Agent(BaseModel, ABC): if response.done: self.collect_metrics(response) - message.metadata["eval_count"] += response.eval_count - message.metadata["eval_duration"] += response.eval_duration - message.metadata["prompt_eval_count"] += response.prompt_eval_count - message.metadata[ - "prompt_eval_duration" - ] += response.prompt_eval_duration + message.metadata.eval_count += response.eval_count + message.metadata.eval_duration += response.eval_duration + message.metadata.prompt_eval_count += response.prompt_eval_count + message.metadata.prompt_eval_duration += response.prompt_eval_duration self.context_tokens = ( response.prompt_eval_count + response.eval_count ) @@ -530,7 +515,7 @@ class Agent(BaseModel, ABC): yield message end_time = time.perf_counter() - message.metadata["timers"]["streamed"] = f"{(end_time - start_time):.4f}" + message.metadata.timers["streamed"] = end_time - start_time return async def process_message( @@ -560,7 +545,7 @@ class Agent(BaseModel, ABC): self.context.processing = True - message.metadata["system_prompt"] = ( + message.system_prompt = ( f"<|system|>\n{self.system_prompt.strip()}\n" ) message.context_prompt = "" @@ -575,11 +560,11 @@ class Agent(BaseModel, ABC): message.status = "thinking" yield message - message.metadata["context_size"] = self.set_optimal_context_size( + message.context_size = self.set_optimal_context_size( llm, model, prompt=message.context_prompt ) - message.response = f"Processing {'RAG augmented ' if message.metadata['rag'] else ''}query..." + message.response = f"Processing {'RAG augmented ' if message.metadata.rag else ''}query..." message.status = "thinking" yield message diff --git a/src/utils/agents/job_description.py b/src/utils/agents/job_description.py index 4137966..a9c1687 100644 --- a/src/utils/agents/job_description.py +++ b/src/utils/agents/job_description.py @@ -19,9 +19,10 @@ import time import asyncio import numpy as np # type: ignore -from .base import Agent, agent_registry, LLMMessage -from ..message import Message -from ..setup_logging import setup_logging +from . base import Agent, agent_registry, LLMMessage +from .. message import Message +from .. rag import ChromaDBGetResponse +from .. setup_logging import setup_logging logger = setup_logging() @@ -91,8 +92,11 @@ class JobDescription(Agent): await asyncio.sleep(1) # Allow the event loop to process the write self.context.processing = True + job_description = message.preamble["job_description"] + resume = message.preamble["resume"] original_message = message.model_copy() + original_message.prompt = job_description original_message.response = "" self.conversation.add(original_message) @@ -100,11 +104,9 @@ class JobDescription(Agent): self.model = model self.metrics.generate_count.labels(agent=self.agent_type).inc() with self.metrics.generate_duration.labels(agent=self.agent_type).time(): - job_description = message.preamble["job_description"] - resume = message.preamble["resume"] try: - async for message in self.generate_factual_tailored_resume( + async for message in self.generate_resume( message=message, job_description=job_description, resume=resume ): if message.status != "done": @@ -124,7 +126,7 @@ class JobDescription(Agent): # Done processing, add message to conversation self.context.processing = False - resume_generation = message.metadata.get("resume_generation", {}) + resume_generation = message.metadata.resume_generation if not resume_generation: message.response = ( "Generation did not generate metadata necessary for processing." @@ -341,18 +343,19 @@ Name: {candidate_name} ## OUTPUT FORMAT: Provide the resume in clean markdown format, ready for the candidate to use. -## REFERENCE (Original Resume): """ +# ## REFERENCE (Original Resume): +# """ - # Add a truncated version of the original resume for reference if it's too long - max_resume_length = 25000 # Characters - if len(original_resume) > max_resume_length: - system_prompt += ( - original_resume[:max_resume_length] - + "...\n[Original resume truncated due to length]" - ) - else: - system_prompt += original_resume +# # Add a truncated version of the original resume for reference if it's too long +# max_resume_length = 25000 # Characters +# if len(original_resume) > max_resume_length: +# system_prompt += ( +# original_resume[:max_resume_length] +# + "...\n[Original resume truncated due to length]" +# ) +# else: +# system_prompt += original_resume prompt = "Create a tailored professional resume that highlights candidate's skills and experience most relevant to the job requirements. Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no commentary before or after." return system_prompt, prompt @@ -413,7 +416,7 @@ Provide the resume in clean markdown format, ready for the candidate to use. yield message return - def calculate_match_statistics(self, job_requirements, skill_assessment_results): + def calculate_match_statistics(self, job_requirements, skill_assessment_results) -> dict[str, dict[str, Any]]: """ Calculate statistics about how well the candidate matches job requirements @@ -594,9 +597,10 @@ a SPECIFIC skill based solely on their resume and supporting evidence. }} ``` -## CANDIDATE RESUME: -{resume} """ +# ## CANDIDATE RESUME: +# {resume} +# """ # Add RAG content if provided if rag_content: @@ -821,7 +825,7 @@ IMPORTANT: Be factual and precise. If you cannot find strong evidence for this s LLMMessage(role="system", content=system_prompt), LLMMessage(role="user", content=prompt), ] - message.metadata["options"] = { + message.metadata.options = { "seed": 8911, "num_ctx": self.context_size, "temperature": temperature, # Higher temperature to encourage tool usage @@ -837,7 +841,7 @@ IMPORTANT: Be factual and precise. If you cannot find strong evidence for this s model=self.model, messages=messages, options={ - **message.metadata["options"], + **message.metadata.options, }, stream=True, ): @@ -860,54 +864,40 @@ IMPORTANT: Be factual and precise. If you cannot find strong evidence for this s if response.done: self.collect_metrics(response) - message.metadata["eval_count"] += response.eval_count - message.metadata["eval_duration"] += response.eval_duration - message.metadata["prompt_eval_count"] += response.prompt_eval_count - message.metadata[ - "prompt_eval_duration" - ] += response.prompt_eval_duration + message.metadata.eval_count += response.eval_count + message.metadata.eval_duration += response.eval_duration + message.metadata.prompt_eval_count += response.prompt_eval_count + message.metadata.prompt_eval_duration += response.prompt_eval_duration self.context_tokens = response.prompt_eval_count + response.eval_count message.chunk = "" message.status = "done" yield message - def rag_function(self, skill: str) -> tuple[str, list[Any]]: + def retrieve_rag_content(self, skill: str) -> tuple[str, ChromaDBGetResponse]: if self.context is None or self.context.file_watcher is None: raise ValueError("self.context or self.context.file_watcher is None") try: rag_results = "" - all_metadata = [] - chroma_results = self.context.file_watcher.find_similar( - query=skill, top_k=5, threshold=0.5 - ) + rag_metadata = ChromaDBGetResponse() + chroma_results = self.context.file_watcher.find_similar(query=skill, top_k=10, threshold=0.5) if chroma_results: - chroma_embedding = np.array( - chroma_results["query_embedding"] - ).flatten() # Ensure correct shape - print(f"Chroma embedding shape: {chroma_embedding.shape}") + query_embedding = np.array(chroma_results["query_embedding"]).flatten() - umap_2d = self.context.file_watcher.umap_model_2d.transform( - [chroma_embedding] - )[0].tolist() - print( - f"UMAP 2D output: {umap_2d}, length: {len(umap_2d)}" - ) # Debug output + umap_2d = self.context.file_watcher.umap_model_2d.transform([query_embedding])[0] + umap_3d = self.context.file_watcher.umap_model_3d.transform([query_embedding])[0] - umap_3d = self.context.file_watcher.umap_model_3d.transform( - [chroma_embedding] - )[0].tolist() - print( - f"UMAP 3D output: {umap_3d}, length: {len(umap_3d)}" - ) # Debug output - - all_metadata.append( - { - "name": "JPK", - **chroma_results, - "umap_embedding_2d": umap_2d, - "umap_embedding_3d": umap_3d, - } + rag_metadata = ChromaDBGetResponse( + query=skill, + query_embedding=query_embedding.tolist(), + name="JPK", + ids=chroma_results.get("ids", []), + embeddings=chroma_results.get("embeddings", []), + documents=chroma_results.get("documents", []), + metadatas=chroma_results.get("metadatas", []), + umap_embedding_2d=umap_2d.tolist(), + umap_embedding_3d=umap_3d.tolist(), + size=self.context.file_watcher.collection.count() ) for index, metadata in enumerate(chroma_results["metadatas"]): @@ -919,18 +909,19 @@ IMPORTANT: Be factual and precise. If you cannot find strong evidence for this s ] ).strip() rag_results += f""" -Source: {metadata.get("doc_type", "unknown")}: {metadata.get("path", "")} lines {metadata.get("line_begin", 0)}-{metadata.get("line_end", 0)} +Source: {metadata.get("doc_type", "unknown")}: {metadata.get("path", "")} +Document reference: {chroma_results["ids"][index]} Content: { content } """ - return rag_results, all_metadata + return rag_results, rag_metadata except Exception as e: - logger.error(e) logger.error(traceback.format_exc()) + logger.error(e) exit(0) - async def generate_factual_tailored_resume( + async def generate_resume( self, message: Message, job_description: str, resume: str ) -> AsyncGenerator[Message, None]: """ @@ -947,12 +938,8 @@ Content: { content } if self.context is None: raise ValueError(f"context is None in {self.agent_type}") - message.status = "thinking" - logger.info(message.response) - yield message - - message.metadata["resume_generation"] = {} - metadata = message.metadata["resume_generation"] + message.metadata.resume_generation = {} + metadata = message.metadata.resume_generation # Stage 1A: Analyze job requirements streaming_message = Message(prompt="Analyze job requirements") streaming_message.status = "thinking" @@ -975,8 +962,8 @@ Content: { content } prompts = self.process_job_requirements( job_requirements=job_requirements, resume=resume, - rag_function=self.rag_function, - ) # , retrieve_rag_content) + rag_function=self.retrieve_rag_content, + ) # UI should persist this state of the message partial = message.model_copy() @@ -1051,9 +1038,15 @@ Content: { content } partial.title = ( f"Skill {index}/{total_prompts}: {description} [{match_level}]" ) - partial.metadata["rag"] = rag + partial.metadata.rag = [rag] # Front-end expects a list of RAG retrievals if skill_description: - partial.response += f"\n\n{skill_description}" + partial.response = f""" +```json +{json.dumps(skill_assessment_results[skill_name]["skill_assessment"])} +``` + +{skill_description} +""" yield partial self.conversation.add(partial) diff --git a/src/utils/context.py b/src/utils/context.py index 21651be..88fb3ce 100644 --- a/src/utils/context.py +++ b/src/utils/context.py @@ -7,9 +7,10 @@ import numpy as np # type: ignore import logging from uuid import uuid4 from prometheus_client import CollectorRegistry, Counter # type: ignore +import traceback from .message import Message, Tunables -from .rag import ChromaDBFileWatcher +from .rag import ChromaDBFileWatcher, ChromaDBGetResponse from . import defines from . import tools as Tools from .agents import AnyAgent @@ -35,7 +36,7 @@ class Context(BaseModel): user_job_description: Optional[str] = None user_facts: Optional[str] = None tools: List[dict] = Tools.enabled_tools(Tools.tools) - rags: List[dict] = [] + rags: List[ChromaDBGetResponse] = [] message_history_length: int = 5 # Class managed fields agents: List[Annotated[Union[*Agent.__subclasses__()], Field(discriminator="agent_type")]] = Field( # type: ignore @@ -82,56 +83,40 @@ class Context(BaseModel): if not self.file_watcher: message.response = "No RAG context available." - del message.metadata["rag"] message.status = "done" yield message return - message.metadata["rag"] = [] for rag in self.rags: - if not rag["enabled"]: + if not rag.enabled: continue - message.response = f"Checking RAG context {rag['name']}..." + message.response = f"Checking RAG context {rag.name}..." yield message chroma_results = self.file_watcher.find_similar( query=message.prompt, top_k=top_k, threshold=threshold ) if chroma_results: - entries += len(chroma_results["documents"]) + query_embedding = np.array(chroma_results["query_embedding"]).flatten() - chroma_embedding = np.array( - chroma_results["query_embedding"] - ).flatten() # Ensure correct shape - print(f"Chroma embedding shape: {chroma_embedding.shape}") + umap_2d = self.file_watcher.umap_model_2d.transform([query_embedding])[0] + umap_3d = self.file_watcher.umap_model_3d.transform([query_embedding])[0] - umap_2d = self.file_watcher.umap_model_2d.transform( - [chroma_embedding] - )[0].tolist() - print( - f"UMAP 2D output: {umap_2d}, length: {len(umap_2d)}" - ) # Debug output - - umap_3d = self.file_watcher.umap_model_3d.transform( - [chroma_embedding] - )[0].tolist() - print( - f"UMAP 3D output: {umap_3d}, length: {len(umap_3d)}" - ) # Debug output - - message.metadata["rag"].append( - { - "name": rag["name"], - **chroma_results, - "umap_embedding_2d": umap_2d, - "umap_embedding_3d": umap_3d, - "size": self.file_watcher.collection.count() - } + rag_metadata = ChromaDBGetResponse( + query=message.prompt, + query_embedding=query_embedding.tolist(), + name=rag.name, + ids=chroma_results.get("ids", []), + embeddings=chroma_results.get("embeddings", []), + documents=chroma_results.get("documents", []), + metadatas=chroma_results.get("metadatas", []), + umap_embedding_2d=umap_2d.tolist(), + umap_embedding_3d=umap_3d.tolist(), + size=self.file_watcher.collection.count() ) - message.response = f"Results from {rag['name']} RAG: {len(chroma_results['documents'])} results." - yield message - if entries == 0: - del message.metadata["rag"] + message.metadata.rag.append(rag_metadata) + message.response = f"Results from {rag.name} RAG: {len(chroma_results['documents'])} results." + yield message message.response = ( f"RAG context gathered from results from {entries} documents." @@ -142,7 +127,8 @@ class Context(BaseModel): except Exception as e: message.status = "error" message.response = f"Error generating RAG results: {str(e)}" - logger.error(e) + logger.error(traceback.format_exc()) + logger.error(message.response) yield message return diff --git a/src/utils/message.py b/src/utils/message.py index 507d2e2..ed5e3a9 100644 --- a/src/utils/message.py +++ b/src/utils/message.py @@ -1,12 +1,28 @@ from pydantic import BaseModel, Field # type: ignore -from typing import Dict, List, Optional, Any +from typing import Dict, List, Optional, Any, Union, Mapping from datetime import datetime, timezone +from . rag import ChromaDBGetResponse +from ollama._types import Options # type: ignore class Tunables(BaseModel): - enable_rag: bool = Field(default=True) # Enable RAG collection chromadb matching - enable_tools: bool = Field(default=True) # Enable LLM to use tools - enable_context: bool = Field(default=True) # Add <|context|> field to message + enable_rag: bool = True # Enable RAG collection chromadb matching + enable_tools: bool = True # Enable LLM to use tools + enable_context: bool = True # Add <|context|> field to message +class MessageMetaData(BaseModel): + rag: List[ChromaDBGetResponse] = Field(default_factory=list) + eval_count: int = 0 + eval_duration: int = 0 + prompt_eval_count: int = 0 + prompt_eval_duration: int = 0 + context_size: int = 0 + resume_generation: Optional[Dict[str, Any]] = None + options: Optional[Union[Mapping[str, Any], Options]] = None + tools: Optional[Dict[str, Any]] = None + timers: Optional[Dict[str, float]] = None + + #resume : str = "" + #match_stats: Optional[Dict[str, Dict[str, Any]]] = Field(default=None) class Message(BaseModel): model_config = {"arbitrary_types_allowed": True} # Allow Event @@ -18,35 +34,21 @@ class Message(BaseModel): # Generated while processing message status: str = "" # Status of the message - preamble: dict[str, str] = {} # Preamble to be prepended to the prompt + preamble: Dict[str, Any] = Field(default_factory=dict) # Preamble to be prepended to the prompt system_prompt: str = "" # System prompt provided to the LLM context_prompt: str = "" # Full content of the message (preamble + prompt) response: str = "" # LLM response to the preamble + query - metadata: Dict[str, Any] = Field( - default_factory=lambda: { - "rag": [], - "eval_count": 0, - "eval_duration": 0, - "prompt_eval_count": 0, - "prompt_eval_duration": 0, - "context_size": 0, - } - ) + metadata: MessageMetaData = Field(default_factory=MessageMetaData) network_packets: int = 0 # Total number of streaming packets network_bytes: int = 0 # Total bytes sent while streaming packets actions: List[str] = ( [] ) # Other session modifying actions performed while processing the message - timestamp: datetime = datetime.now(timezone.utc) - chunk: str = Field( - default="" - ) # This needs to be serialized so it will be sent in responses - partial_response: str = Field( - default="" - ) # This needs to be serialized so it will be sent in responses on timeout - title: str = Field( - default="" - ) # This needs to be serialized so it will be sent in responses on timeout + timestamp: str = str(datetime.now(timezone.utc)) + chunk: str = "" + partial_response: str = "" + title: str = "" + context_size: int = 0 def add_action(self, action: str | list[str]) -> None: """Add a actions(s) to the message.""" diff --git a/src/utils/rag.py b/src/utils/rag.py index f3a899d..51077bc 100644 --- a/src/utils/rag.py +++ b/src/utils/rag.py @@ -1,5 +1,5 @@ -from pydantic import BaseModel # type: ignore -from typing import List, Optional, Dict, Any +from pydantic import BaseModel, field_serializer, field_validator, model_validator, Field # type: ignore +from typing import List, Optional, Dict, Any, Union import os import glob from pathlib import Path @@ -7,15 +7,9 @@ import time import hashlib import asyncio import logging -import os -import glob -import time -import hashlib -import asyncio import json import numpy as np # type: ignore import traceback -import os import chromadb import ollama @@ -38,19 +32,51 @@ else: # When imported as a module, use relative imports from . import defines -__all__ = ["ChromaDBFileWatcher", "start_file_watcher"] +__all__ = ["ChromaDBFileWatcher", "start_file_watcher", "ChromaDBGetResponse"] DEFAULT_CHUNK_SIZE = 750 DEFAULT_CHUNK_OVERLAP = 100 - class ChromaDBGetResponse(BaseModel): - ids: List[str] - embeddings: Optional[List[List[float]]] = None - documents: Optional[List[str]] = None - metadatas: Optional[List[Dict[str, Any]]] = None - + name: str = "" + size: int = 0 + ids: List[str] = [] + embeddings: List[List[float]] = Field(default=[]) + documents: List[str] = [] + metadatas: List[Dict[str, Any]] = [] + query: str = "" + query_embedding: Optional[List[float]] = Field(default=None) + umap_embedding_2d: Optional[List[float]] = Field(default=None) + umap_embedding_3d: Optional[List[float]] = Field(default=None) + enabled: bool = True + + class Config: + validate_assignment = True + @field_validator("embeddings", "query_embedding", "umap_embedding_2d", "umap_embedding_3d") + @classmethod + def validate_embeddings(cls, value, field): + logging.info(f"Validating {field.field_name} with value: {type(value)} - {value}") + if value is None: + return value + if isinstance(value, np.ndarray): + if field.field_name == "embeddings": + if value.ndim != 2: + raise ValueError(f"{field.name} must be a 2-dimensional NumPy array") + return [[float(x) for x in row] for row in value.tolist()] + else: + if value.ndim != 1: + raise ValueError(f"{field.field_name} must be a 1-dimensional NumPy array") + return [float(x) for x in value.tolist()] + if field.field_name == "embeddings": + if not all(isinstance(sublist, list) and all(isinstance(x, (int, float)) for x in sublist) for sublist in value): + raise ValueError(f"{field.field_name} must be a list of lists of floats") + return [[float(x) for x in sublist] for sublist in value] + else: + if not isinstance(value, list) or not all(isinstance(x, (int, float)) for x in value): + raise ValueError(f"{field.field_name} must be a list of floats") + return [float(x) for x in value] + class ChromaDBFileWatcher(FileSystemEventHandler): def __init__( self, @@ -323,13 +349,13 @@ class ChromaDBFileWatcher(FileSystemEventHandler): n_components=2, random_state=8911, metric="cosine", - n_neighbors=15, + n_neighbors=30, min_dist=0.1, ) self._umap_embedding_2d = self._umap_model_2d.fit_transform(vectors) - logging.info( - f"2D UMAP model n_components: {self._umap_model_2d.n_components}" - ) # Should be 2 + # logging.info( + # f"2D UMAP model n_components: {self._umap_model_2d.n_components}" + # ) # Should be 2 logging.info( f"Updating 3D UMAP for {len(self._umap_collection['embeddings'])} vectors" @@ -338,13 +364,13 @@ class ChromaDBFileWatcher(FileSystemEventHandler): n_components=3, random_state=8911, metric="cosine", - n_neighbors=15, - min_dist=0.1, + n_neighbors=30, + min_dist=0.01, ) self._umap_embedding_3d = self._umap_model_3d.fit_transform(vectors) - logging.info( - f"3D UMAP model n_components: {self._umap_model_3d.n_components}" - ) # Should be 3 + # logging.info( + # f"3D UMAP model n_components: {self._umap_model_3d.n_components}" + # ) # Should be 3 def _get_vector_collection(self, recreate=False) -> Collection: """Get or create a ChromaDB collection.""" @@ -380,14 +406,36 @@ class ChromaDBFileWatcher(FileSystemEventHandler): """Split documents into chunks using the text splitter.""" return self.text_splitter.split_documents(docs) - def get_embedding(self, text, normalize=True): - """Generate embeddings using Ollama.""" - response = self.llm.embeddings(model=defines.embedding_model, prompt=text) - embedding = response["embedding"] + def get_embedding(self, text: str) -> np.ndarray: + """Generate and normalize an embedding for the given text.""" + + # Get embedding + try: + response = self.llm.embeddings(model=defines.embedding_model, prompt=text) + embedding = np.array(response["embedding"]) + except Exception as e: + logging.error(f"Failed to get embedding: {e}") + raise + + # Log diagnostics + logging.info(f"Input text: {text}") + logging.info(f"Embedding shape: {embedding.shape}, First 5 values: {embedding[:5]}") + + # Check for invalid embeddings + if embedding.size == 0 or np.any(np.isnan(embedding)) or np.any(np.isinf(embedding)): + logging.error("Invalid embedding: contains NaN, infinite, or empty values.") + raise ValueError("Invalid embedding returned from Ollama.") + + # Check normalization + norm = np.linalg.norm(embedding) + is_normalized = np.allclose(norm, 1.0, atol=1e-3) + logging.info(f"Embedding norm: {norm}, Is normalized: {is_normalized}") + + # Normalize if needed + if not is_normalized: + embedding = embedding / norm + logging.info("Embedding normalized manually.") - if normalize: - normalized = self._normalize_embeddings(embedding) - return normalized return embedding def add_embeddings_to_collection(self, chunks: List[Chunk]):