Skip to content

Commit

Permalink
Merge branch 'main' into pre-commit-ci-update-config
Browse files Browse the repository at this point in the history
  • Loading branch information
gaya3-zipstack authored Jul 31, 2024
2 parents 5365887 + eb64aab commit e3c3421
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 54 deletions.
13 changes: 13 additions & 0 deletions backend/pipeline/serializers/crud.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from pipeline.models import Pipeline
from scheduler.helper import SchedulerHelper
from utils.serializer_utils import SerializerUtils
from workflow_manager.endpoint.models import WorkflowEndpoint

from backend.serializers import AuditSerializer
from unstract.connectors.connectorkit import Connectorkit
Expand Down Expand Up @@ -73,6 +74,18 @@ def _add_connector_data(
connector_id=instance.connector_id,
)
)
if repr[PC.DESTINATION_NAME] == PC.NOT_CONFIGURED:
try:
check_manual_review = WorkflowEndpoint.objects.get(
workflow=instance.workflow,
endpoint_type=WorkflowEndpoint.EndpointType.DESTINATION,
connection_type=WorkflowEndpoint.ConnectionType.MANUALREVIEW,
)
if check_manual_review:
repr[PC.DESTINATION_NAME] = "Manual Review"
except Exception as ex:
logger.debug(f"Not a Manual review destination: {ex}")

return repr

def to_representation(self, instance: Pipeline) -> OrderedDict[str, Any]:
Expand Down
5 changes: 4 additions & 1 deletion docker/scripts/merge_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,10 @@
#
import sys

PREFERRED_BASE_ENV_KEYS = []
PREFERRED_BASE_ENV_KEYS = [
"STRUCTURE_TOOL_IMAGE_URL",
"STRUCTURE_TOOL_IMAGE_TAG",
]
DEFAULT_AUTH_KEY = "unstract"
DEFAULT_ADMIN_KEY = "admin"
SET_DEFAULT_KEYS = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ function OutputForDocModal({
}, [open, singlePassExtractMode, isSinglePassExtractLoading]);

useEffect(() => {
updatePromptOutput();
updatePromptOutput(docOutputs);
}, [docOutputs]);

useEffect(() => {
Expand Down Expand Up @@ -118,7 +118,7 @@ function OutputForDocModal({

const updatePromptOutput = (data) => {
setPromptOutputs((prev) => {
const updatedPromptOutput = getUpdatedPromptOutput(data, prev);
const updatedPromptOutput = data || [...prev];
const keys = Object.keys(docOutputs);

keys.forEach((key) => {
Expand All @@ -130,10 +130,6 @@ function OutputForDocModal({
});
};

const getUpdatedPromptOutput = (data, prev) => {
return data || [...prev];
};

const updatePromptOutputInstance = (updatedPromptOutput, docId, key) => {
const index = findPromptOutputIndex(updatedPromptOutput, docId);
const promptOutputInstance = createOrUpdatePromptOutputInstance(
Expand Down Expand Up @@ -221,10 +217,17 @@ function OutputForDocModal({
const rowsData = [];
const docs = moveSelectedDocToTop();
docs.forEach((item) => {
const output = data.find(
(outputValue) => outputValue?.document_manager === item?.document_id
);
const key = `${output?.prompt_id}__${output?.document_manager}__${output?.profile_manager}`;
const output = data.find((outputValue) => {
const docId =
outputValue?.document_manager ||
(outputValue?.key && getDocIdFromKey(outputValue?.key)) ||
null;
return docId === item?.document_id;
});
const key = `${promptId}__${item?.document_id}__${
selectedProfile || profileManagerId
}`;

let status = outputStatus.fail;
let message = displayPromptResult(output?.output, true);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ function PromptCard({

if (runAllLLM) {
let selectedProfiles = llmProfiles;
if (!coverAllDoc && selectedLlmProfiles?.length > 0) {
if (selectedLlmProfiles?.length) {
selectedProfiles = llmProfiles.filter((profile) =>
selectedLlmProfiles.includes(profile?.profile_id)
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,21 +95,31 @@ const EtlTaskDeploy = ({
return null;
});
};

const getWorkflows = () => {
const connectorType = type === "task" ? "FILESYSTEM" : "DATABASE";
const fetchWorkflows = (type) =>
workflowApiService
.getWorkflowEndpointList("DESTINATION", connectorType)
.then((res) => {
const updatedData = res?.data.map((record) => ({
.getWorkflowEndpointList("DESTINATION", type)
.then((res) =>
res?.data.map((record) => ({
...record,
id: record.workflow,
}));
setWorkflowList(updatedData);
})
}))
)
.catch(() => {
console.error("Unable to get workflow list");
return [];
});
const getWorkflows = () => {
const connectorType = type === "task" ? "FILESYSTEM" : "DATABASE";
setWorkflowList([]);
fetchWorkflows(connectorType).then((data) => {
if (connectorType === "DATABASE") {
fetchWorkflows("MANUALREVIEW").then((manualReviewData) => {
const combinedData = [...data, ...manualReviewData];
setWorkflowList(combinedData);
});
} else {
setWorkflowList(data);
}
});
};

useEffect(() => {
Expand Down
34 changes: 2 additions & 32 deletions prompt-service/src/unstract/prompt_service/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,31 +129,6 @@ def construct_prompt(
return prompt


def construct_prompt_for_engine(
preamble: str,
prompt: str,
postamble: str,
grammar_list: list[dict[str, Any]],
) -> str:
# Let's cleanup the context. Remove if 3 consecutive newlines are found
prompt = f"{preamble}\n\nQuestion or Instruction: {prompt}\n"
if grammar_list is not None and len(grammar_list) > 0:
prompt += "\n"
for grammar in grammar_list:
word = ""
synonyms = []
if PSKeys.WORD in grammar:
word = grammar[PSKeys.WORD]
if PSKeys.SYNONYMS in grammar:
synonyms = grammar[PSKeys.SYNONYMS]
if len(synonyms) > 0 and word != "":
prompt += f'\nNote: You can consider that the word {word} is same as \
{", ".join(synonyms)} in both the quesiton and the context.' # noqa
prompt += f"\n\n{postamble}"
prompt += "\n\n"
return prompt


def authentication_middleware(func: Any) -> Any:
def wrapper(*args: Any, **kwargs: Any) -> Any:
token = AuthenticationMiddleware.get_token_from_auth_header(request)
Expand Down Expand Up @@ -703,13 +678,8 @@ def run_retrieval( # type:ignore
vector_index,
retrieval_type: str,
) -> tuple[str, str]:
prompt = construct_prompt_for_engine(
preamble=tool_settings.get(PSKeys.PREAMBLE, ""),
prompt=output[PSKeys.PROMPTX],
postamble=tool_settings.get(PSKeys.POSTAMBLE, ""),
grammar_list=tool_settings.get(PSKeys.GRAMMAR, []),
)
if retrieval_type is PSKeys.SUBQUESTION:
prompt = output[PSKeys.PROMPTX]
if retrieval_type == PSKeys.SUBQUESTION:
subq_prompt = (
f"Generate a sub-question from the following verbose prompt that will"
f" help extract relevant documents from a vector store:\n\n{prompt}"
Expand Down

0 comments on commit e3c3421

Please sign in to comment.