Just checking in
This commit is contained in:
parent
ba124c1673
commit
b3630ebba4
@ -50,6 +50,15 @@ div {
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
|
||||
.DocBox {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex-grow: 1;
|
||||
max-width: 2048px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.Controls {
|
||||
display: flex;
|
||||
background-color: #F5F5F5;
|
||||
|
@ -47,9 +47,7 @@ import '@fontsource/roboto/700.css';
|
||||
const welcomeMarkdown = `
|
||||
# Welcome to Backstory
|
||||
|
||||
Backstory was written by James Ketrenos in order to provide answers to questions potential employers may have about his work history. In addition to being a RAG enabled expert system, the LLM has access to real-time data.
|
||||
|
||||
You can ask things like:
|
||||
Backstory was written by James Ketrenos in order to provide answers to questions potential employers may have about his work history. In addition to being a RAG enabled expert system, the LLM has access to real-time data. You can ask things like:
|
||||
|
||||
<ChatQuery text="What is James Ketrenos' work history?"/>
|
||||
<ChatQuery text="What programming languages has James used?"/>
|
||||
|
@ -136,7 +136,7 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({generateResume, resume,
|
||||
value={jobDescription}
|
||||
onChange={(e) => setJobDescription(e.target.value)}
|
||||
onKeyDown={handleKeyPress}
|
||||
placeholder="Enter a job description, then click Generate..."
|
||||
placeholder="Paste a job description (or URL that resolves to one), then click Generate..."
|
||||
/>
|
||||
</Document>
|
||||
<Button onClick={(e: any) => { triggerGeneration(jobDescription); }}>Generate</Button>
|
||||
@ -160,9 +160,9 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({generateResume, resume,
|
||||
<Typography>Generating resume...</Typography>
|
||||
</Box>
|
||||
</>}
|
||||
<Card sx={{ display: "flex", flexGrow: 1, overflow: "auto", minHeight: "fit-content", p: 1 }}>
|
||||
{resume !== undefined
|
||||
? <Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If this resume seems too good to be true, expand the <b>LLM information for this query</b> section and click the links to the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography>
|
||||
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1 }}>
|
||||
{resume !== undefined || processing == true
|
||||
? <Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links in the <b>Top RAG</b> matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography>
|
||||
: <Typography>Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.</Typography>
|
||||
}
|
||||
</Card>
|
||||
@ -193,7 +193,7 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({generateResume, resume,
|
||||
value={jobDescription}
|
||||
onChange={(e) => setJobDescription(e.target.value)}
|
||||
onKeyDown={handleKeyPress}
|
||||
placeholder="Enter a job description, then click Generate..."
|
||||
placeholder="Paste a job description (or URL that resolves to one), then click Generate..."
|
||||
/>
|
||||
</Document>
|
||||
<Tooltip title="Generate">
|
||||
@ -217,7 +217,12 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({generateResume, resume,
|
||||
data-testid="loader"
|
||||
/>
|
||||
</Box>
|
||||
{resume !== undefined && <Card sx={{ display: "flex", flexGrow: 1, overflow: "auto", minHeight: "fit-content", p: 1 }}><Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If this resume seems too good to be true, expand the <b>LLM information for this query</b> section and click the links to the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography></Card>}
|
||||
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1 }}>
|
||||
{resume !== undefined || processing == true
|
||||
? <Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links to the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography>
|
||||
: <Typography>Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.</Typography>
|
||||
}
|
||||
</Card>
|
||||
</Box>
|
||||
</Box>
|
||||
|
||||
|
@ -78,7 +78,7 @@ const MessageMeta = ({ metadata }: MessageMetaInterface) => {
|
||||
</TableContainer>
|
||||
{
|
||||
metadata.tools !== undefined && metadata.tools.length !== 0 &&
|
||||
<Accordion>
|
||||
<Accordion sx={{ boxSizing: "border-box" }}>
|
||||
<AccordionSummary expandIcon={<ExpandMoreIcon />}>
|
||||
<Box sx={{ fontSize: "0.8rem" }}>
|
||||
Tools queried
|
||||
@ -88,10 +88,10 @@ const MessageMeta = ({ metadata }: MessageMetaInterface) => {
|
||||
{metadata.tools.map((tool: any, index: number) => <Box key={index}>
|
||||
{index !== 0 && <Divider />}
|
||||
<Box sx={{ fontSize: "0.75rem", display: "flex", flexDirection: "column", mt: 0.5 }}>
|
||||
<div style={{ display: "flex", paddingRight: "1rem", minWidth: "10rem", whiteSpace: "nowrap" }}>
|
||||
<div style={{ display: "flex", paddingRight: "1rem", whiteSpace: "nowrap" }}>
|
||||
{tool.tool}
|
||||
</div>
|
||||
<div style={{ display: "flex", padding: "3px", whiteSpace: "pre-wrap", flexGrow: 1, border: "1px solid #E0E0E0", maxHeight: "5rem", overflow: "auto" }}>{JSON.stringify(tool.result, null, 2)}</div>
|
||||
<div style={{ display: "flex", padding: "3px", whiteSpace: "pre-wrap", flexGrow: 1, border: "1px solid #E0E0E0", wordBreak: "break-all", maxHeight: "5rem", overflow: "auto" }}>{JSON.stringify(tool.result, null, 2)}</div>
|
||||
</Box>
|
||||
</Box>)}
|
||||
</AccordionDetails>
|
||||
|
@ -203,7 +203,7 @@ const ResumeBuilder = ({scrollToBottom, isScrolledToBottom, setProcessing, proce
|
||||
};
|
||||
|
||||
return (
|
||||
<Box className="ChatBox">
|
||||
<Box className="DocBox">
|
||||
<Box className="Conversation">
|
||||
<DocumentViewer sx={{
|
||||
display: "flex",
|
||||
@ -215,51 +215,6 @@ const ResumeBuilder = ({scrollToBottom, isScrolledToBottom, setProcessing, proce
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
return (<Box className="ChatBox">
|
||||
<Box className="Conversation">
|
||||
<TextField
|
||||
variant="outlined"
|
||||
fullWidth
|
||||
multiline
|
||||
rows="10"
|
||||
type="text"
|
||||
value={jobDescription}
|
||||
onChange={(e) => setJobDescription(e.target.value)}
|
||||
onKeyDown={handleKeyPress}
|
||||
placeholder="Enter the job description.."
|
||||
id="JobDescriptionInput"
|
||||
/>
|
||||
<Tooltip title="Generate">
|
||||
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { generateResume(jobDescription); }}>Generate<SendIcon /></Button>
|
||||
</Tooltip>
|
||||
<Box sx={{
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
alignItems: "center",
|
||||
justifyContent: "center",
|
||||
mb: 1
|
||||
}}>
|
||||
<PropagateLoader
|
||||
size="10px"
|
||||
loading={processing}
|
||||
aria-label="Loading Spinner"
|
||||
data-testid="loader"
|
||||
/>
|
||||
{processing === true && countdown > 0 && (
|
||||
<Box
|
||||
sx={{
|
||||
pt: 1,
|
||||
fontSize: "0.7rem",
|
||||
color: "darkgrey"
|
||||
}}
|
||||
>Estimated response time: {countdown}s</Box>
|
||||
)}
|
||||
|
||||
{generateStatus && <Message isFullWidth={true} message={generateStatus} />}
|
||||
{/* {resume && <Message isFullWidth={true} message={resume} />} */}
|
||||
</Box>
|
||||
</Box>
|
||||
</Box>);
|
||||
}
|
||||
|
||||
|
||||
|
@ -137,7 +137,7 @@ When answering queries, follow these steps:
|
||||
1. First analyze the query to determine if real-time information might be helpful
|
||||
2. Even when [{context_tag}] is provided, consider whether the tools would provide more current or comprehensive information
|
||||
3. Use the provided tools whenever they would enhance your response, regardless of whether context is also available
|
||||
4. When presenting information like weather forecasts, include relevant emojis immediately before the corresponding text. For example, for a sunny day, say \"☀️ Sunny\" or if the forecast says there will be \"rain showers, say \"🌧️ Rain showers\". Use this mapping for weather emojis: Sunny: ☀️, Cloudy: ☁️, Rainy: 🌧️, Snowy: ❄️
|
||||
4. When presenting weather forecasts, include relevant emojis immediately before the corresponding text. For example, for a sunny day, say \"☀️ Sunny\" or if the forecast says there will be \"rain showers, say \"🌧️ Rain showers\". Use this mapping for weather emojis: Sunny: ☀️, Cloudy: ☁️, Rainy: 🌧️, Snowy: ❄️
|
||||
4. When both [{context_tag}] and tool outputs are relevant, synthesize information from both sources to provide the most complete answer
|
||||
5. Always prioritize the most up-to-date and relevant information, whether it comes from [{context_tag}] or tools
|
||||
6. If [{context_tag}] and tool outputs contain conflicting information, prefer the tool outputs as they likely represent more current data
|
||||
@ -273,6 +273,9 @@ def is_valid_uuid(value):
|
||||
def default_tools(tools):
|
||||
return [{**tool, "enabled": True} for tool in tools]
|
||||
|
||||
def find_summarize_tool(tools):
|
||||
return [{**tool, "enabled": True} for tool in tools if tool.get("name", "") == "AnalyzeSite"]
|
||||
|
||||
def llm_tools(tools):
|
||||
return [tool for tool in tools if tool.get("enabled", False) == True]
|
||||
|
||||
@ -846,6 +849,13 @@ class WebServer:
|
||||
yield {"status": "processing", "message": "Processing request...", "num_ctx": ctx_size}
|
||||
|
||||
# Use the async generator in an async for loop
|
||||
#
|
||||
# To support URL lookup:
|
||||
#
|
||||
# 1. Enable tools in a call to chat() with a simple prompt to invoke the tool to generate the summary if requested.
|
||||
# 2. If not requested (no tool call,) abort the path
|
||||
# 3. Otherwise, we know the URL was good and can use that URLs fetched content as context.
|
||||
#
|
||||
response = self.client.generate(model=self.model, system=system_generate_resume, prompt=content, options={ 'num_ctx': ctx_size })
|
||||
metadata["eval_count"] += response['eval_count']
|
||||
metadata["eval_duration"] += response['eval_duration']
|
||||
|
Loading…
x
Reference in New Issue
Block a user