88 examples
Inconsistent state
Application or component in unexpected state.
[ FAQ1 ]
What is inconsistent state?
Inconsistent state occurs when the visual or logical state of an application diverges from the actual data or conditions it should represent. This typically happens due to improper state management, asynchronous logic errors, or unresolved race conditions, particularly common in frontend frameworks like React. Users might encounter interfaces that display outdated, incorrect, or conflicting information, causing confusion and unreliable application behavior. Persistent inconsistencies degrade user experience and can complicate debugging and maintenance.
[ FAQ2 ]
How to fix inconsistent state
To fix inconsistent state, utilize robust state management practices and clearly structured data flows, especially in frameworks such as React. Employ reliable state-management libraries (e.g., Redux or Zustand) to synchronize UI and data states effectively. Manage asynchronous operations explicitly, using methods like promises, async/await, or React's
useEffect
hook carefully to avoid race conditions and ensure consistent state updates. Regularly test application logic and UI flows with unit and integration tests, identifying and resolving state inconsistencies proactively during development.diff block
+// Copyright 2025 OpenObserve Inc.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+use std::future::Future;
+
+use anyhow::Context;
+use config::meta::ratelimit::RatelimitRule;
+use infra::table::ratelimit::RuleEntry;
+use o2_enterprise::enterprise::super_cluster::queue::ratelimit::{
+ SUPER_CLUSTER_RATELIMIT_KEY_PREFIX, ratelimit_rule_delete, ratelimit_rule_put,
+ ratelimit_rule_update,
+};
+
+#[derive(Debug, thiserror::Error)]
+pub enum RatelimitError {
+ #[error("ratelimit rule with ID {0} not found.")]
+ NotFound(String),
+ #[error("ratelimit rule entry type not supported: {0}")]
+ NotSupportRuleEntry(String),
+ #[error(transparent)]
+ DbError(#[from] anyhow::Error),
+}
+
+#[derive(Debug)]
+enum RuleOperation {
+ Save,
+ Update,
+ Delete,
+}
+
+impl RuleOperation {
+ fn as_str(&self) -> &'static str {
+ match self {
+ RuleOperation::Save => "add",
+ RuleOperation::Update => "update",
+ RuleOperation::Delete => "delete",
+ }
+ }
+
+ async fn execute_cluster_operation(
+ &self,
+ key: String,
+ value: bytes::Bytes,
+ ) -> Result<(), anyhow::Error> {
+ match self {
+ RuleOperation::Save => ratelimit_rule_put(key, value).await,
+ RuleOperation::Update => ratelimit_rule_update(key, value).await,
+ RuleOperation::Delete => ratelimit_rule_delete(key, value).await,
+ }
+ }
+}
+
+async fn handle_rule_operation(
+ rule: RuleEntry,
+ operation: RuleOperation,
+ db_operation: impl Future<Output = Result<(), anyhow::Error>>,
+) -> Result<(), RatelimitError> {
+ let db_result = db_operation.await.map_err(RatelimitError::DbError);
+
+ #[cfg(feature = "enterprise")]
+ if o2_enterprise::enterprise::common::infra::config::get_config()
+ .super_cluster
+ .enabled
+ {
+ if let Err(e) = sync_to_super_cluster(&rule, &operation).await {
+ log::error!(
+ "[Ratelimit] error triggering super cluster event to {} rule: {}",
+ operation.as_str(),
+ e
+ );
+ }
+ }
+
+ db_result
greptile
logic: Database operation error is mapped before super cluster sync attempt. If db_operation fails, the error is returned immediately, potentially leaving the super cluster in an inconsistent state.
diff block
+import asyncio
+import os
+from typing import Optional
+from dotenv import load_dotenv
+from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
+import httpx
+from qdrant_client.http.exceptions import ResponseHandlingException
+
+from qdrant_wrapper.qdrant_base import QdrantBase, DocumentStatus
+
+# Load environment variables
+load_dotenv()
+
+class QdrantRechunk(QdrantBase):
+ """Class for rechunking and purging operations in Qdrant."""
+
+ def __init__(self):
+ super().__init__()
+
+ @retry(
+ stop=stop_after_attempt(5),
+ wait=wait_fixed(15),
+ retry=retry_if_exception_type((httpx.ConnectTimeout, ResponseHandlingException))
+ )
+ async def purge_collection(self, document_id: str) -> bool:
+ """
+ Purge a collection from Qdrant.
+
+ Args:
+ document_id: The ID of the document/collection to purge
+
+ Returns:
+ bool: True if the collection was successfully purged, False otherwise
+ """
+ try:
+ # Delete the collection
+ await self.async_client.delete_collection(collection_name=document_id)
+ print(f"Successfully purged collection {document_id}")
+ return True
+
+ except Exception as e:
+ print(f"Error purging collection {document_id}: {e}")
+ return False
+
+ @retry(
+ stop=stop_after_attempt(5),
+ wait=wait_fixed(15),
+ retry=retry_if_exception_type((httpx.ConnectTimeout, ResponseHandlingException))
+ )
+ async def clear_collection_points(self, document_id: str) -> bool:
+ """
+ Clear all points from a collection without deleting the collection itself.
+
+ Args:
+ document_id: The ID of the document/collection to clear
+
+ Returns:
+ bool: True if the points were successfully cleared, False otherwise
+ """
+ try:
+ # Check if collection exists first
+ status = await self.document_exists_and_is_indexed(document_id)
+
+ if status == DocumentStatus.NOT_EXISTS:
+ print(f"Collection {document_id} does not exist. Nothing to clear.")
+ return True
+
+ if status == DocumentStatus.EMPTY:
+ print(f"Collection {document_id} is already empty.")
+ return True
+
+ # Delete all points in the collection
+ await self.async_client.delete(
+ collection_name=document_id,
+ points_selector=None # None means delete all points
+ )
+ print(f"Successfully cleared all points from collection {document_id}")
+ return True
+
+ except Exception as e:
+ print(f"Error clearing points from collection {document_id}: {e}")
+ return False
+
+ @retry(
+ stop=stop_after_attempt(5),
+ wait=wait_fixed(15),
+ retry=retry_if_exception_type((httpx.ConnectTimeout, ResponseHandlingException))
+ )
+ async def purge_all_collections(self) -> bool:
+ """
+ Purge all collections from Qdrant.
+
+ Returns:
+ bool: True if all collections were successfully purged, False if any failed
+ """
+ try:
+ # Get list of all collections
+ collections_response = await self.async_client.get_collections()
+ collections = [collection.name for collection in collections_response.collections]
+
+ if not collections:
+ print("No collections found to purge.")
+ return True
+
+ print(f"Found {len(collections)} collections to purge: {collections}")
+
+ # Purge each collection
+ results = []
+ for collection_name in collections:
+ result = await self.purge_collection(collection_name)
+ results.append(result)
+
+ # Return True only if all purges were successful
+ return all(results)
+
+ except Exception as e:
+ print(f"Error purging all collections: {e}")
+ return False
+
+ @retry(
+ stop=stop_after_attempt(5),
+ wait=wait_fixed(15),
+ retry=retry_if_exception_type((httpx.ConnectTimeout, ResponseHandlingException))
+ )
+ async def clear_all_collection_points(self) -> bool:
+ """
+ Clear all points from all collections without deleting the collections themselves.
+
+ Returns:
+ bool: True if all collections were successfully cleared, False if any failed
+ """
+ try:
+ # Get list of all collections
+ collections_response = await self.async_client.get_collections()
+ collections = [collection.name for collection in collections_response.collections]
+
+ if not collections:
+ print("No collections found to clear.")
+ return True
+
+ print(f"Found {len(collections)} collections to clear: {collections}")
+
+ # Clear each collection
+ results = []
+ for collection_name in collections:
+ result = await self.clear_collection_points(collection_name)
+ results.append(result)
+
+ # Return True only if all clears were successful
+ return all(results)
+
+ except Exception as e:
+ print(f"Error clearing all collections: {e}")
+ return False
+
+ @retry(
+ stop=stop_after_attempt(5),
+ wait=wait_fixed(15),
+ retry=retry_if_exception_type((httpx.ConnectTimeout, ResponseHandlingException))
+ )
+ async def repopulate_collections(self, counties: list[dict]) -> dict:
+ """
+ Repopulate Qdrant collections for a list of counties.
+
+ Args:
+ counties: List of dictionaries with municipality, state_code, and zone_code
+ Example: [{"municipality": "ALBANY", "state_code": "IN", "zone_code": "R-R"}]
+
+ Returns:
+ dict: Results of the repopulation process with county names as keys and success status as values
+ """
+ from agent_graphs.extractor_graph import extractor_graph
+
+ results = {}
+
+ for county in counties:
+ try:
+ municipality = county.get("municipality")
+ state_code = county.get("state_code")
+ zone_code = county.get("zone_code", "")
+
+ if not municipality or not state_code:
+ print(f"Skipping invalid county data: {county}")
+ results[f"{municipality}_{state_code}"] = False
+ continue
+
+ print(f"Processing {municipality}, {state_code}")
+
+ # Create initial state for the extractor graph
+ initial_state = {
+ "document_id": "",
+ "document_content": "",
+ "section_list": {},
+ "municipality": municipality,
+ "state_code": state_code,
+ "zone_code": zone_code
+ }
+
+ # Configuration for the extractor graph
+ config = {
+ "configurable": {
+ "model_name": "gpt-4o-mini",
+ "test_mode": False
+ }
+ }
+
+ # Run the extractor graph to fetch, parse, and chunk the document
+ final_state = await extractor_graph.ainvoke(initial_state, config)
+
+ document_id = final_state.get("document_id", "")
+ if document_id:
+ status = await self.document_exists_and_is_indexed(document_id)
+ if status in [DocumentStatus.INDEXED, DocumentStatus.UDC]:
+ print(f"Successfully repopulated collection for {municipality}, {state_code}")
+ results[document_id] = True
+ else:
+ print(f"Failed to repopulate collection for {municipality}, {state_code}")
+ results[document_id] = False
+ else:
+ print(f"Failed to get document_id for {municipality}, {state_code}")
+ results[f"{municipality}_{state_code}"] = False
+
+ except Exception as e:
+ print(f"Error repopulating collection for {county}: {e}")
+ county_id = f"{county.get('municipality', 'unknown')}_{county.get('state_code', 'unknown')}"
+ results[county_id] = False
+
+ return results
+
+ @retry(
+ stop=stop_after_attempt(5),
+ wait=wait_fixed(15),
+ retry=retry_if_exception_type((httpx.ConnectTimeout, ResponseHandlingException))
+ )
+ async def purge_and_repopulate(self, counties: list[dict]) -> dict:
+ """
+ Comprehensive method that purges all collections and then repopulates with the provided counties.
+
+ Args:
+ counties: List of dictionaries with municipality, state_code, and zone_code
+ Example: [{"municipality": "ALBANY", "state_code": "IN", "zone_code": "R-R"}]
+
+ Returns:
+ dict: {
+ "purge_success": bool,
+ "total_counties": int,
+ "successful_counties": int,
+ "failed_counties": int,
+ "county_results": dict # Detailed results for each county
+ }
+ """
+ result_summary = {
+ "purge_success": False,
+ "total_counties": len(counties),
+ "successful_counties": 0,
+ "failed_counties": 0,
+ "county_results": {}
+ }
+
+ # Step 1: Purge all existing collections
+ print("Step 1: Purging all existing collections...")
+ purge_success = await self.purge_all_collections()
+ result_summary["purge_success"] = purge_success
+
+ if not purge_success:
+ print("Warning: Failed to purge all collections. Continuing with repopulation...")
+
greptile
logic: Continuing after purge failure could leave the database in an inconsistent state. Consider requiring successful purge before repopulation.
diff block
// Auth auditing should be done by router also
#[cfg(feature = "enterprise")]
tokio::task::spawn(async move { self_reporting::run_audit_publish().await });
+ #[cfg(feature = "enterprise")]
+ {
+ tokio::task::spawn(async move { db::ofga::watch().await });
+ db::ofga::cache().await.expect("ofga model cache failed");
+ o2_openfga::authorizer::authz::init_open_fga().await;
+ // RBAC model
+ if get_openfga_config().enabled {
+ if let Err(e) = crate::common::infra::ofga::init().await {
+ log::error!("OFGA init failed: {}", e);
+ }
+ }
+ }
greptile
logic: The OpenFGA initialization block should handle the case where `init_open_fga().await` fails. Currently only `ofga::init()` has error handling, but a failure in `init_open_fga()` could leave the system in an inconsistent state.
diff block
}
async delete(serverlessFunction: ServerlessFunctionEntity) {
- const functionExists = await this.checkFunctionExists(
- serverlessFunction.id,
- );
+ const functionExists = await this.checkFunctionExists(serverlessFunction);
if (functionExists) {
const deleteFunctionCommand = new DeleteFunctionCommand({
- FunctionName: serverlessFunction.id,
+ FunctionName: this.getFunctionName(serverlessFunction),
});
await (await this.getLambdaClient()).send(deleteFunctionCommand);
}
}
- private getInMemoryServerlessFunctionFolderPath = (
- serverlessFunction: ServerlessFunctionEntity,
- version: string,
- ) => {
- return join(SERVERLESS_TMPDIR_FOLDER, serverlessFunction.id, version);
- };
+ private async build(serverlessFunction: ServerlessFunctionEntity) {
+ const functionExists = await this.checkFunctionExists(serverlessFunction);
- async build(serverlessFunction: ServerlessFunctionEntity, version: 'draft') {
- if (version !== 'draft') {
- throw new Error("We can only build 'draft' version with lambda driver");
+ if (functionExists) {
+ return;
}
greptile
logic: Early return without cleanup could leave resources in an inconsistent state if function exists
diff block
if (payload.hostname && customDomainRegistered) {
this.domainManagerService
.deleteCustomHostnameByHostnameSilently(payload.hostname)
- .catch(() => {
- // send to sentry
+ .catch((err) => {
+ this.exceptionHandlerService.captureExceptions([err]);
});
}
greptile
logic: Error in rollback is silently captured but original error is still thrown. This could leave system in inconsistent state if rollback fails.
diff block
use serde::{Deserialize, Serialize};
use strum::{Display, EnumString};
-/// Minimal team information
-#[derive(Debug, Default, Serialize, Deserialize, PartialEq)]
-pub struct Response {
- /// Team ID
- pub id: String,
-
- /// Name used for display purposes
- pub display_name: String,
-
- /// Is this user an admin of the team
- pub is_admin: bool,
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[typeshare::typeshare]
+pub struct TeamListResponse {
+ pub teams: Vec<TeamResponse>,
}
-/// Member of a team
-#[derive(Debug, Serialize, Deserialize, PartialEq)]
-pub struct MemberResponse {
- /// User ID
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[typeshare::typeshare]
+pub struct TeamResponse {
pub id: String,
+ /// Display name
+ pub name: String,
+ /// Membership info of the calling user
+ pub membership: TeamMembership,
+}
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[typeshare::typeshare]
+pub struct TeamMembersResponse {
+ pub members: Vec<TeamMembership>,
+}
+
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[typeshare::typeshare]
+pub struct TeamMembership {
+ pub user_id: String,
/// Role of the user in the team
- pub role: MemberRole,
+ pub role: TeamRole,
}
-/// Role of a user in a team
-#[derive(Debug, Serialize, Deserialize, PartialEq, Display, EnumString)]
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Display, EnumString)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
-pub enum MemberRole {
+#[typeshare::typeshare]
+pub enum TeamRole {
+ Owner,
Admin,
Member,
}
+
+/// Provide user id to add user.
+/// Provide email address to invite user via email.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[typeshare::typeshare]
+pub struct AddTeamMemberRequest {
+ pub user_id: Option<String>,
+ pub email: Option<String>,
+ /// Role of the user in the team
+ pub role: Option<TeamRole>,
greptile
style: Consider making `role` field required since `TeamMembership` requires a role. Optional roles could lead to inconsistent states
diff block
</StyledHeaderIconContainer>
<StyledHeaderInfo>
<StyledHeaderTitle>
- <TextInput
+ <TitleInput
disabled={disabled}
+ sizeVariant="md"
value={title}
- copyButton={false}
- hotkeyScope="workflow-step-title"
- onEnter={onTitleChange}
- onEscape={onTitleChange}
onChange={handleChange}
- shouldTrim={false}
+ placeholder={headerType}
+ hotkeyScope="workflow-step-title"
+ onEnter={submitTitle}
+ onEscape={() => {
+ setTitle(initialTitle);
+ }}
greptile
logic: onEscape resets title but doesn't update command menu page info, creating inconsistent state
suggested fix
onEscape={() => {
setTitle(initialTitle);
+ updateCommandMenuPageInfo({
+ pageTitle: initialTitle,
+ pageIcon: Icon,
+ });
}}
diff block
+import { mergeAttributes, Node } from '@tiptap/core'
+import { Fragment, Node as PMNode, Slice } from 'prosemirror-model'
+import { EditorState, NodeSelection, Plugin, PluginKey, TextSelection } from 'prosemirror-state'
+import { Decoration, DecorationSet } from 'prosemirror-view'
+import { blockToNode, inlineContentToNodes } from '../../../api/nodeConversions/nodeConversions'
+
+import { BlockChildrenType } from '../api/blockTypes'
+import { ResolvedPos } from '@tiptap/pm/model'
+import { EditorView } from '@tiptap/pm/view'
+import { mergeCSSClasses } from '../../../shared/utils'
+import { BlockNoteDOMAttributes, BlockSchema, PartialBlock } from '../api/blockTypes'
+import { getBlockInfoFromPos } from '../helpers/getBlockInfoFromPos'
+import { getGroupInfoFromPos } from '../helpers/getGroupInfoFromPos'
+import styles from './Block.module.css'
+import BlockAttributes from './BlockAttributes'
+
+const SelectionPluginKey = new PluginKey('selectionPluginKey')
+const ClickSelectionPluginKey = new PluginKey('clickSelectionPluginKey')
+const PastePluginKey = new PluginKey('pastePluginKey')
+const headingLinePluginKey = new PluginKey('HeadingLinePlugin')
+
+const SelectionPlugin = new Plugin({
+ key: SelectionPluginKey,
+ state: {
+ init() {
+ return DecorationSet.empty
+ },
+ apply(tr, oldState) {
+ return tr.getMeta(SelectionPluginKey) || oldState
+ },
+ },
+ props: {
+ decorations(state) {
+ return this.getState(state)
+ },
+ },
+})
+
+const ClickSelectionPlugin = new Plugin({
+ key: ClickSelectionPluginKey,
+ props: {
+ handleDOMEvents: {
+ mousedown(view, event) {
+ if (event.shiftKey && event.button === 0) {
+ const { state } = view
+ const editorBoundingBox = (view.dom.firstChild! as HTMLElement).getBoundingClientRect()
+ const coords = {
+ left: editorBoundingBox.left + editorBoundingBox.width / 2, // take middle of editor
+ top: event.clientY,
+ }
+ let pos = view.posAtCoords(coords)
+ if (!pos) {
+ return undefined
+ }
+ const { selection } = state
+ const selectedPos = state.doc.resolve(selection.from)
+ const nodePos = state.doc.resolve(pos.pos)
+ if (selectedPos.start() === selection.from && pos.pos === nodePos.end()) {
+ const decoration = Decoration.widget(nodePos.pos, () => {
+ const span = document.createElement('span')
+ span.style.backgroundColor = 'blue'
+ span.style.width = '10px'
+ span.style.height = '10px'
+ return span
+ })
+ const decorationSet = DecorationSet.create(state.doc, [decoration])
+ view.dispatch(state.tr.setMeta(SelectionPluginKey, decorationSet))
+ }
+ return false
+ }
+ return false
+ },
+ },
+ },
+})
+
+const PastePlugin = new Plugin({
+ key: PastePluginKey,
+ props: {
+ handlePaste: (view, event) => {
+ if (!event.clipboardData) {
+ return false
+ }
+
+ const { state } = view
+ let { tr } = state
+ const { selection } = state
+ const { $from, $to } = selection
+
+ const targetNode = state.doc.resolve($from.pos).parent
+
+ if (targetNode.type.name === 'image') {
+ tr = tr.insertText(event.clipboardData.getData('text/plain'), $from.pos, $to.pos)
+ view.dispatch(tr)
+ return true
+ }
+
+ return false
+ },
+ },
+})
+
+const headingLinePlugin = new Plugin({
+ key: headingLinePluginKey,
+ view(editorView) {
+ return new HeadingLinePlugin(editorView)
+ },
+})
+
+class HeadingLinePlugin {
+ private line: HTMLElement
+ constructor(view: EditorView) {
+ this.line = document.createElement('div')
+ this.line.style.transition = 'all 0.15s ease-in-out'
+ this.line.style.pointerEvents = 'none'
+ this.line.style.display = ''
+ this.line.style.opacity = '0'
+ view.dom.parentNode?.appendChild(this.line)
+
+ this.update(view, null)
+ }
+
+ update(view: EditorView, lastState: EditorState | null) {
+ let state = view.state
+ // Don't do anything if the document/selection didn't change
+ if (lastState && lastState.doc.eq(state.doc) && lastState.selection.eq(state.selection)) return
+
+ let res = getNearestHeadingFromPos(state, state.selection.from)
+
+ if (res && res.heading?.type.name === 'heading') {
+ let { node } = view.domAtPos(res.groupStartPos)
+
+ let rect = (node as HTMLElement).getBoundingClientRect()
+ let editorRect = view.dom.getBoundingClientRect()
+ let groupPadding = 10
+ let editorPaddingTop = 40
+ this.line.style.position = 'absolute'
+ this.line.style.top = `${rect.top + editorPaddingTop + groupPadding - editorRect.top}px`
+ this.line.style.left = `${rect.left - editorRect.left + groupPadding}px`
+ this.line.style.width = `2.5px`
+ this.line.style.height = `${rect.height - groupPadding * 2}px`
+ this.line.style.backgroundColor = 'var(--brand5)'
+ this.line.style.opacity = '0.4'
+ } else {
+ this.line.style.opacity = '0'
+ return
+ }
+ }
+
+ destroy() {
+ this.line.remove()
+ }
+}
+
+function getNearestHeadingFromPos(state: EditorState, pos: number) {
+ const $pos = state.doc.resolve(pos)
+ const maxDepth = $pos.depth
+ let group = $pos.node(maxDepth)
+ let heading = group.firstChild
+ let depth = maxDepth
+
+ if (maxDepth > 3) {
+ while (true) {
+ if (depth < 0) {
+ break
+ }
+
+ if (group.type.name === 'blockContainer' && heading?.type.name === 'heading') {
+ break
+ }
+
+ depth -= 1
+ group = $pos.node(depth)
+ heading = group.firstChild
+ }
+ return {
+ depth,
+ groupStartPos: $pos.start(depth),
+ heading,
+ group,
+ $pos,
+ }
+ }
+
+ return
+}
+
+export function getParentBlockFromPos(state: EditorState, pos: number) {
+ const $pos = state.doc.resolve(pos)
+ const depth = $pos.depth
+
+ // if (depth > 3 && container.type.name == 'blockContainer') {
+ if (depth > 3) {
+ let parent = $pos.node(depth - 3)
+ let parentGroup = $pos.node(depth - 2)
+ let parentPos = $pos.start(depth - 3)
+ return {
+ parentGroup,
+ parentBlock: parent.firstChild,
+ parentPos,
+ depth,
+ $pos,
+ }
+ }
+
+ return
+}
+declare module '@tiptap/core' {
+ interface Commands<ReturnType> {
+ block: {
+ BNCreateBlock: (pos: number) => ReturnType
+ BNDeleteBlock: (posInBlock: number) => ReturnType
+ BNMergeBlocks: (posBetweenBlocks: number) => ReturnType
+ BNSplitBlock: (posInBlock: number, keepType: boolean) => ReturnType
+ BNSplitHeadingBlock: (posInBlock: number) => ReturnType
+ BNUpdateBlock: <BSchema extends BlockSchema>(posInBlock: number, block: PartialBlock<BSchema>) => ReturnType
+ BNCreateOrUpdateBlock: <BSchema extends BlockSchema>(
+ posInBlock: number,
+ block: PartialBlock<BSchema>,
+ ) => ReturnType
+ UpdateGroupChildren: (
+ group: PMNode,
+ groupPos: ResolvedPos,
+ groupLevel: number,
+ listType: BlockChildrenType,
+ indent: number,
+ ) => ReturnType
+ UpdateGroup: (
+ posInBlock: number,
+ listType: BlockChildrenType,
+ tab: boolean,
+ // start?: string,
+ isSank?: boolean,
+ turnInto?: boolean,
+ ) => ReturnType
+ }
+ }
+}
+
+/**
+ * The main "Block node" documents consist of
+ */
+export const BlockContainer = Node.create<{
+ domAttributes?: BlockNoteDOMAttributes
+}>({
+ name: 'blockContainer',
+ group: 'blockContainer',
+ // A block always contains content, and optionally a blockGroup which contains nested blocks
+ content: 'blockContent blockGroup?',
+ // Ensures content-specific keyboard handlers trigger first.
+ priority: 50,
+ defining: true,
+
+ parseHTML() {
+ return [
+ {
+ tag: 'div',
+ getAttrs: (element) => {
+ if (typeof element === 'string') {
+ return false
+ }
+
+ const attrs: Record<string, string> = {}
+ for (const [nodeAttr, HTMLAttr] of Object.entries(BlockAttributes)) {
+ if (element.getAttribute(HTMLAttr)) {
+ attrs[nodeAttr] = element.getAttribute(HTMLAttr)!
+ }
+ }
+
+ if (element.getAttribute('data-node-type') === 'blockContainer') {
+ return attrs
+ }
+
+ return false
+ },
+ },
+ ]
+ },
+
+ renderHTML({ HTMLAttributes }) {
+ const domAttributes = this.options.domAttributes?.blockContainer || {}
+
+ return [
+ 'div',
+ mergeAttributes(HTMLAttributes, {
+ class: styles.blockOuter,
+ 'data-node-type': 'block-outer',
+ }),
+ [
+ 'div',
+ mergeAttributes(
+ {
+ ...domAttributes,
+ class: mergeCSSClasses(styles.block, domAttributes.class),
+ 'data-node-type': this.name,
+ },
+ HTMLAttributes,
+ ),
+ 0,
+ ],
+ ]
+ },
+
+ addCommands() {
+ return {
+ // Creates a new text block at a given position.
+ BNCreateBlock:
+ (pos) =>
+ ({ state, dispatch }) => {
+ const newBlock = state.schema.nodes['blockContainer'].createAndFill()!
+
+ if (dispatch) {
+ state.tr.insert(pos, newBlock)
+ }
+
+ return true
+ },
+ // Deletes a block at a given position.
+ BNDeleteBlock:
+ (posInBlock) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { startPos, endPos } = blockInfo
+
+ if (dispatch) {
+ state.tr.deleteRange(startPos, endPos)
+ }
+
+ return true
+ },
+ // Updates a block at a given position.
+ BNUpdateBlock:
+ (posInBlock, block) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { startPos, endPos, node, contentNode } = blockInfo
+
+ if (dispatch) {
+ // Adds blockGroup node with child blocks if necessary.
+ if (block.children !== undefined && block.children.length > 0) {
+ const childNodes = []
+
+ // Creates ProseMirror nodes for each child block, including their descendants.
+ for (const child of block.children) {
+ childNodes.push(blockToNode(child, state.schema))
+ }
+
+ // Checks if a blockGroup node already exists.
+ if (node.childCount === 2) {
+ // Replaces all child nodes in the existing blockGroup with the ones created earlier.
+ state.tr.replace(
+ startPos + contentNode.nodeSize + 1,
+ endPos - 1,
+ new Slice(Fragment.from(childNodes), 0, 0),
+ )
+ } else {
+ // Inserts a new blockGroup containing the child nodes created earlier.
+ state.tr.insert(
+ startPos + contentNode.nodeSize,
+ state.schema.nodes['blockGroup'].create({}, childNodes),
+ )
+ }
+ }
+
+ // Replaces the blockContent node's content if necessary.
+ if (block.content !== undefined) {
+ let content: PMNode[] = []
+
+ // Checks if the provided content is a string or InlineContent[] type.
+ if (typeof block.content === 'string') {
+ // Adds a single text node with no marks to the content.
+ content.push(state.schema.text(block.content))
+ } else {
+ // Adds a text node with the provided styles converted into marks to the content, for each InlineContent
+ // object.
+ content = inlineContentToNodes(block.content, state.schema)
+ }
+
+ // Replaces the contents of the blockContent node with the previously created text node(s).
+ state.tr.replace(
+ startPos + 1,
+ startPos + contentNode.nodeSize - 1,
+ new Slice(Fragment.from(content), 0, 0),
+ )
+ }
+
+ // Changes the blockContent node type and adds the provided props as attributes. Also preserves all existing
+ // attributes that are compatible with the new type.
+ state.tr.setNodeMarkup(startPos, block.type === undefined ? undefined : state.schema.nodes[block.type], {
+ ...contentNode.attrs,
+ ...block.props,
+ })
+
+ // Adds all provided props as attributes to the parent blockContainer node too, and also preserves existing
+ // attributes.
+ let providedProps = {
+ ...node.attrs,
+ ...block.props,
+ }
+ state.tr.setNodeMarkup(startPos - 1, undefined, providedProps)
+ }
+
+ return true
+ },
+ // Appends the text contents of a block to the nearest previous block, given a position between them. Children of
+ // the merged block are moved out of it first, rather than also being merged.
+ //
+ // In the example below, the position passed into the function is between Block1 and Block2.
+ //
+ // Block1
+ // Block2
+ // Block3
+ // Block4
+ // Block5
+ //
+ // Becomes:
+ //
+ // Block1
+ // Block2Block3
+ // Block4
+ // Block5
+ BNMergeBlocks:
+ (posBetweenBlocks) =>
+ ({ state, dispatch }) => {
+ const nextNodeIsBlock = state.doc.resolve(posBetweenBlocks + 1).node().type.name === 'blockContainer'
+ const prevNodeIsBlock = state.doc.resolve(posBetweenBlocks - 1).node().type.name === 'blockContainer'
+
+ if (!nextNodeIsBlock || !prevNodeIsBlock) {
+ return false
+ }
+
+ const nextBlockInfo = getBlockInfoFromPos(state.doc, posBetweenBlocks + 1)
+
+ const { node, contentNode, startPos, endPos, depth } = nextBlockInfo!
+
+ // Removes a level of nesting all children of the next block by 1 level, if it contains both content and block
+ // group nodes.
+ if (node.childCount === 2) {
+ const childBlocksStart = state.doc.resolve(startPos + contentNode.nodeSize + 1)
+ const childBlocksEnd = state.doc.resolve(endPos - 1)
+ const childBlocksRange = childBlocksStart.blockRange(childBlocksEnd)
+
+ // Moves the block group node inside the block into the block group node that the current block is in.
+ if (dispatch) {
+ state.tr.lift(childBlocksRange!, depth - 1)
+ }
+ }
+
+ let prevBlockEndPos = posBetweenBlocks - 1
+ let prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+
+ // Finds the nearest previous block, regardless of nesting level.
+ while (prevBlockInfo!.numChildBlocks > 0) {
+ prevBlockEndPos--
+ prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+ if (prevBlockInfo === undefined) {
+ return false
+ }
+ }
+
+ // Deletes next block and adds its text content to the nearest previous block.
+
+ if (dispatch) {
+ dispatch(
+ state.tr
+ .deleteRange(startPos, startPos + contentNode.nodeSize)
+ .replace(prevBlockEndPos - 1, startPos, new Slice(contentNode.content, 0, 0))
+ .scrollIntoView(),
+ )
+
+ state.tr.setSelection(new TextSelection(state.doc.resolve(prevBlockEndPos - 1)))
+ }
+
+ return true
+ },
+ // Splits a block at a given position. Content after the position is moved to a new block below, at the same
+ // nesting level.
+ BNSplitBlock:
+ (posInBlock, keepType) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { contentNode, contentType, startPos, endPos, depth } = blockInfo
+
+ const originalBlockContent = state.doc.cut(startPos + 1, posInBlock)
+ const newBlockContent = state.doc.cut(posInBlock, endPos - 1)
+
+ const newBlock = state.schema.nodes['blockContainer'].createAndFill()!
+
+ const newBlockInsertionPos = endPos + 1
+ const newBlockContentPos = newBlockInsertionPos + 2
+
+ if (dispatch) {
+ // Creates a new block. Since the schema requires it to have a content node, a paragraph node is created
+ // automatically, spanning newBlockContentPos to newBlockContentPos + 1.
+ state.tr.insert(newBlockInsertionPos, newBlock)
+
+ // Replaces the content of the newly created block's content node. Doesn't replace the whole content node so
+ // its type doesn't change.
+ state.tr.replace(
+ newBlockContentPos,
+ newBlockContentPos + 1,
+ newBlockContent.content.size > 0
+ ? new Slice(Fragment.from(newBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+
+ // Changes the type of the content node. The range doesn't matter as long as both from and to positions are
+ // within the content node.
+ if (keepType) {
+ state.tr.setBlockType(
+ newBlockContentPos,
+ newBlockContentPos,
+ state.schema.node(contentType).type,
+ contentNode.attrs,
+ )
+ }
+
+ // Sets the selection to the start of the new block's content node.
+ state.tr.setSelection(new TextSelection(state.doc.resolve(newBlockContentPos)))
+
+ // Replaces the content of the original block's content node. Doesn't replace the whole content node so its
+ // type doesn't change.
+ state.tr.replace(
+ startPos + 1,
+ endPos - 1,
+ originalBlockContent.content.size > 0
+ ? new Slice(Fragment.from(originalBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+ }
+
+ return true
+ },
+ // Splits a block at a given position. Content after the position is moved to a new block below, at the same
+ // nesting level.
+ BNSplitHeadingBlock:
+ (posInBlock) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+ let { node, startPos, contentNode, depth } = blockInfo
+ if (node.childCount === 1) {
+ setTimeout(() => {
+ this.editor
+ .chain()
+ .deleteSelection()
+ .BNSplitBlock(state.selection.from, false)
+ .sinkListItem('blockContainer')
+ .UpdateGroup(-1, blockInfo.node.attrs.listType, true)
+ .run()
+ })
greptile
logic: Using setTimeout with editor commands can cause race conditions and inconsistent state. Consider using a synchronous approach or proper state management.
diff block
+import type { DialogContentProps } from "@/hooks/use-dialog";
+import {
+ Button,
+ DialogFooter,
+ DialogHeader,
+ DialogTitle,
+} from "@rivet-gg/components";
+import * as GoToActorForm from "@/domains/project/forms/go-to-actor-form";
+import { useNavigate } from "@tanstack/react-router";
+
+interface ContentProps extends DialogContentProps {
+ projectNameId: string;
+ environmentNameId: string;
+}
+
+export default function GoToActorDialogContent({
+ projectNameId,
+ environmentNameId,
+ onClose,
+}: ContentProps) {
+ const navigate = useNavigate();
+ return (
+ <GoToActorForm.Form
+ defaultValues={{ actorId: "" }}
+ onSubmit={({ actorId }) => {
+ navigate({
+ to: "/projects/$projectNameId/environments/$environmentNameId/actors",
+ params: {
+ projectNameId,
+ environmentNameId,
+ },
+ search: {
+ actorId,
+ modal: undefined,
+ },
+ });
+ }}
greptile
logic: onClose not called after successful navigation, could leave dialog in inconsistent state if navigation fails
suggested fix
onSubmit={({ actorId }) => {
navigate({
to: "/projects/$projectNameId/environments/$environmentNameId/actors",
params: {
projectNameId,
environmentNameId,
},
search: {
actorId,
modal: undefined,
},
});
+ onClose();
}}
diff block
try {
await api.delete(`/workflows/${workflowId}`);
showSuccessToast("Workflow deleted successfully");
- refreshWorkflows();
+ revalidateWorkflow(workflowId);
+ return true;
greptile
logic: revalidateWorkflow is called before checking if the deletion was successful, which could lead to inconsistent state if the revalidation happens but the deletion failed
```suggestion
+ await api.delete(`/workflows/${workflowId}`);
+ showSuccessToast("Workflow deleted successfully");
revalidateWorkflow(workflowId);
return true;
```
diff block
throw new AppError('MAINOPENEXT', error);
}
});
+
+ ipcMain.on('get-app-path', () => app.getAppPath());
+
+ ipcMain.handle('app_setting', () => LocalStore.getApplicationConfig());
+
+ ipcMain.handle('AUTH_SUCCESS', async (_, arg) => {
+ try {
+ const user = new User({ ...arg, ...arg.user });
+ user.remoteId = arg.userId;
+ user.organizationId = arg.organizationId;
+ if (user.employee) {
+ await userService.save(user.toObject());
+ }
+ } catch (error) {
+ console.log('Error on save user', error);
+ }
+ store.set({
+ auth: { ...arg, isLogout: false }
+ });
+ await closeLoginWindow();
+ });
greptile
style: No error handling if closeLoginWindow fails - could leave window in inconsistent state
suggested fix
ipcMain.handle('AUTH_SUCCESS', async (_, arg) => {
try {
const user = new User({ ...arg, ...arg.user });
user.remoteId = arg.userId;
user.organizationId = arg.organizationId;
if (user.employee) {
await userService.save(user.toObject());
}
store.set({
auth: { ...arg, isLogout: false }
});
await closeLoginWindow();
} catch (error) {
+ log.error('Auth success handler failed', error);
+ throw new AppError('AUTH_SUCCESS_ERROR', error);
}
});
diff block
-import type { Channel } from 'storybook/internal/channels';
-import {
- TESTING_MODULE_CANCEL_TEST_RUN_REQUEST,
- TESTING_MODULE_PROGRESS_REPORT,
- TESTING_MODULE_RUN_REQUEST,
- type TestingModuleCancelTestRunRequestPayload,
- type TestingModuleProgressReportPayload,
- type TestingModuleRunRequestPayload,
-} from 'storybook/internal/core-events';
+import type { TestResult, TestState } from 'vitest/dist/node.js';
+
import type { experimental_UniversalStore } from 'storybook/internal/core-server';
+import type {
+ StatusStoreByTypeId,
+ StatusValue,
+ TestProviderStoreById,
+} from 'storybook/internal/types';
-import { isEqual } from 'es-toolkit';
+import { throttle } from 'es-toolkit';
+import type { Report } from 'storybook/preview-api';
-import { type StoreState, TEST_PROVIDER_ID } from '../constants';
+import { STATUS_TYPE_ID_A11Y, STATUS_TYPE_ID_COMPONENT_TEST, storeOptions } from '../constants';
+import type { RunTrigger, StoreEvent, StoreState, TriggerRunEvent, VitestError } from '../types';
+import { errorToErrorLike } from '../utils';
import { VitestManager } from './vitest-manager';
+export type TestManagerOptions = {
+ store: experimental_UniversalStore<StoreState, StoreEvent>;
+ componentTestStatusStore: StatusStoreByTypeId;
+ a11yStatusStore: StatusStoreByTypeId;
+ testProviderStore: TestProviderStoreById;
+ onError?: (message: string, error: Error) => void;
+ onReady?: () => void;
+};
+
+const testStateToStatusValueMap: Record<TestState | 'warning', StatusValue> = {
+ pending: 'status-value:pending',
+ passed: 'status-value:success',
+ warning: 'status-value:warning',
+ failed: 'status-value:error',
+ skipped: 'status-value:unknown',
+};
+
export class TestManager {
- vitestManager: VitestManager;
-
- selectedStoryCountForLastRun = 0;
-
- constructor(
- private channel: Channel,
- public store: experimental_UniversalStore<StoreState>,
- private options: {
- onError?: (message: string, error: Error) => void;
- onReady?: () => void;
- } = {}
- ) {
- this.vitestManager = new VitestManager(this);
+ public store: TestManagerOptions['store'];
- this.channel.on(TESTING_MODULE_RUN_REQUEST, this.handleRunRequest.bind(this));
- this.channel.on(TESTING_MODULE_CANCEL_TEST_RUN_REQUEST, this.handleCancelRequest.bind(this));
+ public vitestManager: VitestManager;
- this.store.onStateChange((state, previousState) => {
- if (!isEqual(state.config, previousState.config)) {
- this.handleConfigChange(state.config, previousState.config);
- }
- if (state.watching !== previousState.watching) {
- this.handleWatchModeRequest(state.watching);
- }
- });
+ private componentTestStatusStore: TestManagerOptions['componentTestStatusStore'];
- this.vitestManager.startVitest().then(() => options.onReady?.());
- }
+ private a11yStatusStore: TestManagerOptions['a11yStatusStore'];
- async handleConfigChange(config: StoreState['config'], previousConfig: StoreState['config']) {
- process.env.VITEST_STORYBOOK_CONFIG = JSON.stringify(config);
+ private testProviderStore: TestManagerOptions['testProviderStore'];
- if (config.coverage !== previousConfig.coverage) {
- try {
- await this.vitestManager.restartVitest({
- coverage: config.coverage,
- });
- } catch (e) {
- this.reportFatalError('Failed to change coverage configuration', e);
- }
- }
+ private onReady?: TestManagerOptions['onReady'];
+
+ private batchedTestCaseResults: {
+ storyId: string;
+ testResult: TestResult;
+ reports?: Report[];
+ }[] = [];
+
+ constructor(options: TestManagerOptions) {
+ this.store = options.store;
+ this.componentTestStatusStore = options.componentTestStatusStore;
+ this.a11yStatusStore = options.a11yStatusStore;
+ this.testProviderStore = options.testProviderStore;
+ this.onReady = options.onReady;
+
+ this.vitestManager = new VitestManager(this);
+
+ this.store.subscribe('TRIGGER_RUN', this.handleTriggerRunEvent.bind(this));
+ this.store.subscribe('CANCEL_RUN', this.handleCancelEvent.bind(this));
+
+ this.store
+ .untilReady()
+ .then(() =>
+ this.vitestManager.startVitest({ coverage: this.store.getState().config.coverage })
+ )
+ .then(() => this.onReady?.())
+ .catch((e) => {
+ this.reportFatalError('Failed to start Vitest', e);
+ });
}
- async handleWatchModeRequest(watching: boolean) {
- const coverage = this.store.getState().config.coverage ?? false;
-
- if (coverage) {
- try {
- if (watching) {
- // if watch mode is toggled on and coverage is already enabled, restart vitest without coverage to automatically disable it
- await this.vitestManager.restartVitest({ coverage: false });
- } else {
- // if watch mode is toggled off and coverage is already enabled, restart vitest with coverage to automatically re-enable it
- await this.vitestManager.restartVitest({ coverage });
+ async handleTriggerRunEvent(event: TriggerRunEvent) {
+ await this.runTestsWithState({
+ storyIds: event.payload.storyIds,
+ triggeredBy: event.payload.triggeredBy,
+ callback: async () => {
+ try {
+ await this.vitestManager.vitestRestartPromise;
+ await this.vitestManager.runTests(event.payload);
+ } catch (err) {
+ this.reportFatalError('Failed to run tests', err);
+ throw err;
}
- } catch (e) {
- this.reportFatalError('Failed to change watch mode while coverage was enabled', e);
- }
- }
+ },
+ });
}
- async handleRunRequest(payload: TestingModuleRunRequestPayload) {
+ async handleCancelEvent() {
try {
- if (payload.providerId !== TEST_PROVIDER_ID) {
- return;
- }
-
- const state = this.store.getState();
-
- /*
- If we're only running a subset of stories, we have to temporarily disable coverage,
- as a coverage report for a subset of stories is not useful.
- */
- const temporarilyDisableCoverage =
- state.config.coverage && !state.watching && (payload.storyIds ?? []).length > 0;
- if (temporarilyDisableCoverage) {
- await this.vitestManager.restartVitest({
- coverage: false,
- });
- } else {
- await this.vitestManager.vitestRestartPromise;
- }
+ this.store.setState((s) => ({
+ ...s,
+ cancelling: true,
+ }));
+ await this.vitestManager.cancelCurrentRun();
+ } catch (err) {
+ this.reportFatalError('Failed to cancel tests', err);
+ } finally {
+ this.store.setState((s) => ({
+ ...s,
+ cancelling: false,
+ }));
+ }
+ }
- this.selectedStoryCountForLastRun = payload.storyIds?.length ?? 0;
+ async runTestsWithState({
+ storyIds,
+ triggeredBy,
+ callback,
+ }: {
+ storyIds?: string[];
+ triggeredBy: RunTrigger;
+ callback: () => Promise<void>;
+ }) {
+ this.componentTestStatusStore.unset(storyIds);
+ this.a11yStatusStore.unset(storyIds);
- await this.vitestManager.runTests(payload);
+ this.store.setState((s) => ({
+ ...s,
+ currentRun: {
+ ...storeOptions.initialState.currentRun,
+ triggeredBy,
+ startedAt: Date.now(),
+ storyIds: storyIds,
+ config: s.config,
+ },
+ }));
+ // set the config at the start of a test run,
+ // so that changing the config during the test run does not affect the currently running test run
+ process.env.VITEST_STORYBOOK_CONFIG = JSON.stringify(this.store.getState().config);
- if (temporarilyDisableCoverage) {
- // Re-enable coverage if it was temporarily disabled because of a subset of stories was run
- await this.vitestManager.restartVitest({ coverage: state?.config.coverage });
+ await this.testProviderStore.runWithState(async () => {
+ await callback();
+ this.store.send({
+ type: 'TEST_RUN_COMPLETED',
+ payload: this.store.getState().currentRun,
+ });
+ if (this.store.getState().currentRun.unhandledErrors.length > 0) {
+ throw new Error('Tests completed but there are unhandled errors');
}
- } catch (e) {
- this.reportFatalError('Failed to run tests', e);
- }
+ });
greptile
logic: throwing error after sending TEST_RUN_COMPLETED could cause inconsistent state - consider sending error event instead
diff block
+import logging
+from typing import Any
+
+import structlog
+from django.core.management.base import BaseCommand
+from django.db.models import Q
+
+from posthog.rbac.migrations.rbac_team_migration import rbac_team_access_control_migration
+from posthog.rbac.migrations.rbac_feature_flag_migration import rbac_feature_flag_role_access_migration
+from posthog.models.organization import Organization
+from ee.models.rbac.organization_resource_access import OrganizationResourceAccess
+from ee.models.rbac.role import Role
+
+logger = structlog.get_logger(__name__)
+logger.setLevel(logging.INFO)
+
+
+class Command(BaseCommand):
+ help = "Run RBAC migrations for specified organizations"
+
+ def add_arguments(self, parser):
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument(
+ "--org-ids",
+ type=str,
+ help="Comma-separated list of organization IDs",
+ )
+ group.add_argument(
+ "--backfill",
+ action="store_true",
+ help="Find and migrate all organizations that need the RBAC migration",
+ )
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help="Only show organizations that would be migrated without actually running the migrations",
+ )
+
+ def handle(self, *args, **options):
+ if options["org_ids"]:
+ org_ids_input = options["org_ids"]
+ # Parse comma-separated list of organization IDs
+ org_ids = [int(org_id.strip()) for org_id in org_ids_input.split(",")]
+ else: # backfill option
+ self.stdout.write("Finding organizations that need RBAC migration...")
+ org_ids = self.find_organizations_needing_migration()
+
+ if not org_ids:
+ self.stdout.write(self.style.SUCCESS("No organizations found that need RBAC migration."))
+ return
+
+ self.stdout.write(f"Found {len(org_ids)} organizations that need RBAC migration.")
+ for org_id in org_ids:
+ try:
+ org = Organization.objects.get(id=org_id)
+ self.stdout.write(f" - Organization {org_id}: {org.name}")
+ except Organization.DoesNotExist:
+ self.stdout.write(f" - Organization {org_id}: [Not Found]")
+
+ if options["dry_run"]:
+ self.stdout.write(self.style.WARNING("Dry run mode - no migrations were performed."))
+ return
+
+ # Run migrations
+ results = self.run_migrations_for_organizations(org_ids)
+
+ # Print summary to console
+ self.stdout.write(self.style.SUCCESS(f"RBAC Migration Summary:"))
+ self.stdout.write(f"Total organizations: {results['total']}")
+ self.stdout.write(self.style.SUCCESS(f"Successful migrations: {results['successful']}"))
+
+ if results["failed"] > 0:
+ self.stdout.write(self.style.ERROR(f"Failed migrations: {results['failed']}"))
+ else:
+ self.stdout.write(f"Failed migrations: {results['failed']}")
+
+ # Print detailed results
+ self.stdout.write("\nDetailed Results:")
+ for org_result in results["details"]:
+ org_id = org_result["organization_id"]
+ org_name = org_result.get("organization_name", "Unknown")
+
+ if org_result.get("error"):
+ self.stdout.write(self.style.ERROR(f"Organization {org_id} ({org_name}): {org_result['error']}"))
+ continue
+
+ team_success = org_result["team_migration"]["success"]
+ ff_success = org_result["feature_flag_migration"]["success"]
+
+ if team_success and ff_success:
+ self.stdout.write(self.style.SUCCESS(f"Organization {org_id} ({org_name}): All migrations successful"))
+ else:
+ self.stdout.write(self.style.WARNING(f"Organization {org_id} ({org_name}): Some migrations failed"))
+
+ if not team_success:
+ error = org_result["team_migration"]["error"] or "Unknown error"
+ self.stdout.write(self.style.ERROR(f" Team migration failed: {error}"))
+
+ if not ff_success:
+ error = org_result["feature_flag_migration"]["error"] or "Unknown error"
+ self.stdout.write(self.style.ERROR(f" Feature flag migration failed: {error}"))
+
+ def find_organizations_needing_migration(self) -> list[int]:
+ """
+ Find organizations that need RBAC migration based on the following criteria:
+ - Has a team with access_control = True
+ - Has an OrganizationResourceAccess row
+ - Has a Role with feature flag access settings
+
+ Returns:
+ List of organization IDs that need migration
+ """
+ # Find organizations with teams that have access_control = True
+ orgs_with_team_access_control = (
+ Organization.objects.filter(teams__access_control=True).values_list("id", flat=True).distinct()
+ )
+
+ # Find organizations with OrganizationResourceAccess rows
+ orgs_with_resource_access = OrganizationResourceAccess.objects.values_list(
+ "organization_id", flat=True
+ ).distinct()
+
+ # Find organizations with roles that have feature flag access
+ orgs_with_feature_flag_roles = (
+ Role.objects.filter(Q(feature_flags_access_level__isnull=False) | Q(feature_flag_role_access__isnull=False))
+ .values_list("organization_id", flat=True)
+ .distinct()
+ )
+
+ # Combine all organization IDs
+ all_org_ids = (
+ set(orgs_with_team_access_control) | set(orgs_with_resource_access) | set(orgs_with_feature_flag_roles)
+ )
+
+ return sorted(all_org_ids)
+
+ def run_migrations_for_organizations(self, organization_ids: list[int]) -> dict[str, Any]:
+ """
+ Run RBAC migrations for a list of organizations.
+
+ Args:
+ organization_ids: List of organization IDs to run migrations for
+
+ Returns:
+ Dictionary with summary of migration results
+ """
+ results = {"total": len(organization_ids), "successful": 0, "failed": 0, "details": []}
+
+ for org_id in organization_ids:
+ org_result = {
+ "organization_id": org_id,
+ "team_migration": {"success": False, "error": None},
+ "feature_flag_migration": {"success": False, "error": None},
+ }
+
+ # Verify organization exists
+ try:
+ org = Organization.objects.get(id=org_id)
+ org_result["organization_name"] = org.name
+ logger.info("Starting RBAC migrations", organization_id=org_id, organization_name=org.name)
+ except Organization.DoesNotExist:
+ error_msg = f"Organization with ID {org_id} does not exist"
+ org_result["error"] = error_msg
+ logger.exception(error_msg)
+ results["failed"] += 1
+ results["details"].append(org_result)
+ continue
+
+ # Run team access control migration
+ try:
+ rbac_team_access_control_migration(org_id)
+ org_result["team_migration"]["success"] = True
+ logger.info("Team access control migration successful", organization_id=org_id)
+ except Exception as e:
+ error_msg = str(e)
+ org_result["team_migration"]["error"] = error_msg
+ logger.error(
+ "Team access control migration failed", organization_id=org_id, error=error_msg, exc_info=True
+ )
greptile
logic: No transaction handling around migrations. If team migration fails, the organization could be left in an inconsistent state.
diff block
import SwiftUI
struct ContentView: View {
- @Environment(\.model) var model
+ @Environment(\.windowState) private var state
+
@Default(.panelWidth) var panelWidth
@Default(.isRegularApp) var isRegularApp
+ @ObservedObject private var accessibilityPermissionManager = AccessibilityPermissionManager.shared
+ @Default(.showOnboarding) var showOnboarding
static let idealWidth: CGFloat = 400
- static let bottomPadding: CGFloat = 100
-
- @State var screenHeight: CGFloat = NSScreen.main?.visibleFrame.height ?? 0
-
- var maxHeight: CGFloat {
- guard screenHeight != 0 else { return 0 }
-
- return screenHeight - ContentView.bottomPadding
- }
-
- var toolbarPaddingTop: CGFloat {
- isRegularApp ? -28 : 0
- }
+ static let bottomPadding: CGFloat = 0
- var showFileImporterBinding: Binding<Bool> {
+ private var showFileImporterBinding: Binding<Bool> {
Binding(
- get: { self.model.showFileImporter },
- set: { self.model.showFileImporter = $0 }
+ get: { state.showFileImporter },
+ set: { state.showFileImporter = $0 }
)
}
+
+ private var shouldShowOnboarding: Bool {
+ let accessibilityPermissionGranted = accessibilityPermissionManager.accessibilityPermissionStatus == .granted
+ return !accessibilityPermissionGranted && showOnboarding
+ }
var body: some View {
- ZStack(alignment: .top) {
- VStack(spacing: 0) {
- Toolbar()
- .padding(.top, toolbarPaddingTop)
- PromptDivider()
- ChatView()
+ HStack(spacing: -TetheredButton.width / 2) {
+ if isRegularApp { TetheredButton() }
+
+ ZStack(alignment: .top) {
+ if shouldShowOnboarding {
+ VStack(spacing: 0) {
+ if state.showChatView {
+ OnboardingAccessibility().transition(.opacity)
+ } else {
+ Spacer()
+ }
+ }
+ .frame(width: TetherAppsManager.minOnitWidth, height: .infinity)
+ .background(Color.black)
+ } else {
+ VStack(spacing: 0) {
+ if !isRegularApp { Toolbar() }
+ else { Spacer().frame(height: 38) }
+
+ PromptDivider()
+
+ if !isRegularApp { ChatView() }
+ else if state.showChatView { ChatView().transition(.opacity) }
+ else { Spacer() }
greptile
logic: ChatView is conditionally rendered in two different places with different logic - this could lead to inconsistent states
diff block
+import { useState, useEffect } from "react";
+import { LocalStorage, environment } from "@raycast/api";
+import { showFailureToast } from "@raycast/utils";
+import { log } from "../utils";
+import { SpotlightSearchPreferences } from "../types";
+import { userInfo } from "os";
+
+/**
+ * Hook for managing user preferences
+ */
+export function usePreferences() {
+ const [searchScope, setSearchScope] = useState<string>(userInfo().homedir);
+ const [isShowingDetail, setIsShowingDetail] = useState<boolean>(false);
+ const [showNonCloudLibraryPaths, setShowNonCloudLibraryPaths] = useState<boolean>(false);
+ const [hasCheckedPreferences, setHasCheckedPreferences] = useState<boolean>(false);
+
+ // Load preferences on mount
+ useEffect(() => {
+ const loadPreferences = async () => {
+ try {
+ log("debug", "usePreferences", "Loading preferences");
+
+ // Get preferences from LocalStorage
+ const maybePreferences = await LocalStorage.getItem(`${environment.extensionName}-preferences`);
+
+ if (maybePreferences) {
+ try {
+ const preferences = JSON.parse(maybePreferences as string);
+
+ // Update state with loaded preferences
+ setSearchScope(preferences?.searchScope || userInfo().homedir);
+ setIsShowingDetail(preferences?.isShowingDetail !== undefined ? preferences.isShowingDetail : true);
+ setShowNonCloudLibraryPaths(preferences?.showNonCloudLibraryPaths || false);
+
+ log("debug", "usePreferences", "Loaded preferences", {
+ searchScope: preferences?.searchScope,
+ isShowingDetail: preferences?.isShowingDetail,
+ showNonCloudLibraryPaths: preferences?.showNonCloudLibraryPaths,
+ });
+ } catch (error) {
+ log("error", "usePreferences", "Error parsing preferences from storage", {
+ error: error instanceof Error ? error.message : String(error),
+ });
+ }
greptile
logic: Error is caught but preferences aren't reset to defaults. Could leave app in inconsistent state if preferences are corrupted.
diff block
+import LRU from 'lru-cache'
+import { Counter } from 'prom-client'
+
+export interface TokenRestrictionCache {
+ get(key: string): string | null | undefined
+ set(key: string, value: string | null): void
+ clear(): void
+}
+
+const tokenRestrictionCacheRequests = new Counter({
+ name: 'token_restriction_cache_requests_total',
+ help: 'Total number of token restriction cache requests',
+ labelNames: ['cache_type', 'result'] as const,
+})
+
+export class LRUTokenRestrictionCache implements TokenRestrictionCache {
+ private hitCache: LRU<string, string>
+ private missCache: LRU<string, boolean>
+
+ constructor(options: { hitCacheSize?: number; missCacheSize?: number; ttlMs?: number }) {
+ const { hitCacheSize = 1000, missCacheSize = 1000, ttlMs = 1000 * 60 * 60 * 24 } = options
+
+ this.hitCache = new LRU<string, string>({
+ max: hitCacheSize,
+ maxAge: ttlMs,
+ })
+
+ this.missCache = new LRU<string, boolean>({
+ max: missCacheSize,
+ maxAge: ttlMs,
+ })
+ }
+
+ get(key: string): string | null | undefined {
+ const cachedValue = this.hitCache.get(key)
+ if (cachedValue !== undefined) {
+ tokenRestrictionCacheRequests.inc({ cache_type: 'token-restriction-cache', result: 'hit' })
+ return cachedValue
+ }
+
+ if (this.missCache.has(key)) {
+ tokenRestrictionCacheRequests.inc({ cache_type: 'token-restriction-cache', result: 'hit' })
+ return null
+ }
+
+ tokenRestrictionCacheRequests.inc({ cache_type: 'token-restriction-cache', result: 'miss' })
+ return undefined
+ }
greptile
logic: The get() method could potentially return undefined for a key that was previously set to null, if the miss cache entry expired but the hit cache entry hasn't. This creates an inconsistent state.
diff block
'use client';
import React, { useEffect, useMemo, useState } from 'react';
-import { useMemoizedFn, useMount, useUnmount } from '@/hooks';
+import { useMemoizedFn } from '@/hooks';
import { IDataResult } from '@/api/asset_interfaces';
import { useMetricResultsLayout } from './useMetricResultsLayout';
-import { useChatLayoutContextSelector } from '@/layouts/ChatLayout/ChatLayoutContext';
import { AppSplitterRef } from '@/components/ui/layouts';
import { AppVerticalCodeSplitter } from '@/components/features/layouts/AppVerticalCodeSplitter';
import { useMetricRunSQL } from './useMetricRunSQL';
import { useGetMetric, useGetMetricData } from '@/api/buster_rest/metrics';
const autoSaveId = 'metric-view-results';
-export const MetricViewResultsController: React.FC<{ metricId: string }> = React.memo(
- ({ metricId }) => {
- const appSplitterRef = React.useRef<AppSplitterRef>(null);
- const selectedFileViewSecondary = useChatLayoutContextSelector(
- (x) => x.selectedFileViewSecondary
- );
- const containerRef = React.useRef<HTMLDivElement>(null);
+export const MetricViewResultsController: React.FC<{
+ metricId: string;
+ useSQL: boolean;
+}> = React.memo(({ metricId, useSQL }) => {
+ const appSplitterRef = React.useRef<AppSplitterRef>(null);
+ const containerRef = React.useRef<HTMLDivElement>(null);
- const {
- runSQL,
- resetRunSQLData,
- saveSQL,
- saveMetricError,
- runSQLError,
- isSavingMetric,
- isRunningSQL
- } = useMetricRunSQL();
+ const {
+ runSQL,
+ resetRunSQLData,
+ saveSQL,
+ saveMetricError,
+ runSQLError,
+ isSavingMetric,
+ isRunningSQL
+ } = useMetricRunSQL();
- const { data: metric } = useGetMetric(
- { id: metricId },
- {
- select: ({ sql, data_source_id }) => ({
- sql,
- data_source_id
- })
- }
- );
- const { data: metricData, isFetched: isFetchedInitialData } = useGetMetricData(
- { id: metricId },
- { enabled: false }
- );
+ const { data: metric } = useGetMetric(
+ { id: metricId },
+ {
+ select: ({ sql, data_source_id }) => ({
+ sql,
+ data_source_id
+ })
+ }
+ );
+ const { data: metricData, isFetched: isFetchedInitialData } = useGetMetricData(
+ { id: metricId },
+ { enabled: false }
+ );
- const [sql, setSQL] = useState(metric?.sql || '');
+ const [sql, setSQL] = useState(metric?.sql || '');
- const dataSourceId = metric?.data_source_id || '';
- const data: IDataResult = metricData?.dataFromRerun || metricData?.data || null;
+ const dataSourceId = metric?.data_source_id || '';
+ const data: IDataResult = metricData?.dataFromRerun || metricData?.data || null;
- const disableSave = useMemo(() => {
- return !sql || isRunningSQL || sql === metric?.sql;
- }, [sql, isRunningSQL, metric?.sql]);
+ const disableSave = useMemo(() => {
+ return !sql || isRunningSQL || sql === metric?.sql;
+ }, [sql, isRunningSQL, metric?.sql]);
- const onRunQuery = useMemoizedFn(async () => {
- try {
- const res = await runSQL({
- dataSourceId,
- sql,
- metricId
- });
+ const onRunQuery = useMemoizedFn(async () => {
+ try {
+ const res = await runSQL({
+ dataSourceId,
+ sql,
+ metricId
+ });
- if (res && res.data && res.data.length > 0) {
- const data = res.data;
- const headerHeight = 28.1;
- const heightOfRow = 28.1;
- const heightOfDataContainer = headerHeight + heightOfRow * (data.length || 0);
- const containerHeight = containerRef.current?.clientHeight || 0;
- const maxHeight = Math.floor(containerHeight * 0.6);
- const finalHeight = Math.min(heightOfDataContainer, maxHeight) + 12;
- appSplitterRef.current?.setSplitSizes(['auto', `${finalHeight}px`]);
- }
- } catch (error) {
- //
+ if (res && res.data && res.data.length > 0) {
+ const data = res.data;
+ const headerHeight = 28.1;
+ const heightOfRow = 28.1;
+ const heightOfDataContainer = headerHeight + heightOfRow * (data.length || 0);
+ const containerHeight = containerRef.current?.clientHeight || 0;
+ const maxHeight = Math.floor(containerHeight * 0.6);
+ const finalHeight = Math.min(heightOfDataContainer, maxHeight) + 12;
+ appSplitterRef.current?.setSplitSizes(['auto', `${finalHeight}px`]);
}
- });
+ } catch (error) {
+ //
+ }
+ });
- const onSaveSQL = useMemoizedFn(async () => {
- await saveSQL({
- metricId,
- sql,
- dataSourceId
- });
+ const onSaveSQL = useMemoizedFn(async () => {
+ await saveSQL({
+ metricId,
+ sql,
+ dataSourceId
});
+ });
greptile
logic: onSaveSQL doesn't handle errors. Could leave component in inconsistent state if save fails
diff block
import { create } from 'zustand'
import { persist } from 'zustand/middleware'
+export type SidebarMode = 'expanded' | 'collapsed' | 'hover'
+
interface SidebarState {
- isCollapsed: boolean
- toggleCollapsed: () => void
- setCollapsed: (collapsed: boolean) => void
+ mode: SidebarMode
+ isExpanded: boolean
greptile
style: Having both `mode` and `isExpanded` could lead to inconsistent states. Consider using only `mode` and deriving expanded state from it.
diff block
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ [key]: value }),
})
-
+
if (!response.ok) {
throw new Error(`Failed to update setting: ${key}`)
}
-
+
set({ error: null })
-
+
lastLoadTime = Date.now()
errorRetryCount = 0
} catch (error) {
logger.error(`Error updating setting ${key}:`, error)
set({ error: error instanceof Error ? error.message : 'Unknown error' })
-
+
if (errorRetryCount < MAX_ERROR_RETRIES) {
errorRetryCount++
logger.debug(`Retry attempt ${errorRetryCount} after error`)
get().loadSettings(true)
} else {
greptile
style: Potential race condition - retrying loadSettings while updateSetting is still in progress could lead to inconsistent state
diff block
+import { useUpsertObjectFilterDropdownCurrentFilter } from '@/object-record/object-filter-dropdown/hooks/useUpsertObjectFilterDropdownCurrentFilter';
+import { fieldMetadataItemUsedInDropdownComponentSelector } from '@/object-record/object-filter-dropdown/states/fieldMetadataItemUsedInDropdownComponentSelector';
+import { objectFilterDropdownCurrentRecordFilterComponentState } from '@/object-record/object-filter-dropdown/states/objectFilterDropdownCurrentRecordFilterComponentState';
+import { selectedOperandInDropdownComponentState } from '@/object-record/object-filter-dropdown/states/selectedOperandInDropdownComponentState';
+import { useCreateEmptyRecordFilterFromFieldMetadataItem } from '@/object-record/record-filter/hooks/useCreateEmptyRecordFilterFromFieldMetadataItem';
+import { RecordFilter } from '@/object-record/record-filter/types/RecordFilter';
+import { RecordFilterOperand } from '@/object-record/record-filter/types/RecordFilterOperand';
+import { useRecoilComponentValueV2 } from '@/ui/utilities/state/component-state/hooks/useRecoilComponentValueV2';
+import { useSetRecoilComponentStateV2 } from '@/ui/utilities/state/component-state/hooks/useSetRecoilComponentStateV2';
+import { isDefined } from 'twenty-shared/utils';
+
+export const useApplyObjectFilterDropdownOperand = () => {
+ const objectFilterDropdownCurrentRecordFilter = useRecoilComponentValueV2(
+ objectFilterDropdownCurrentRecordFilterComponentState,
+ );
+
+ const setSelectedOperandInDropdown = useSetRecoilComponentStateV2(
+ selectedOperandInDropdownComponentState,
+ );
+
+ const objectFilterDropdownFilterIsCreated = isDefined(
+ objectFilterDropdownCurrentRecordFilter,
+ );
+
+ const fieldMetadataItemUsedInDropdown = useRecoilComponentValueV2(
+ fieldMetadataItemUsedInDropdownComponentSelector,
+ );
+
+ const { upsertObjectFilterDropdownCurrentFilter } =
+ useUpsertObjectFilterDropdownCurrentFilter();
+
+ const { createEmptyRecordFilterFromFieldMetadataItem } =
+ useCreateEmptyRecordFilterFromFieldMetadataItem();
+
+ const applyObjectFilterDropdownOperand = (
+ newOperand: RecordFilterOperand,
+ ) => {
+ const isValuelessOperand = [
+ RecordFilterOperand.IsEmpty,
+ RecordFilterOperand.IsNotEmpty,
+ RecordFilterOperand.IsInPast,
+ RecordFilterOperand.IsInFuture,
+ RecordFilterOperand.IsToday,
+ ].includes(newOperand);
+
+ if (objectFilterDropdownFilterIsCreated) {
+ const newCurrentRecordFilter = {
+ ...objectFilterDropdownCurrentRecordFilter,
+ operand: newOperand,
+ } satisfies RecordFilter;
+
+ upsertObjectFilterDropdownCurrentFilter(newCurrentRecordFilter);
+ } else if (isValuelessOperand) {
greptile
logic: Missing handling for non-valueless operands when filter is not created. This could lead to inconsistent state when selecting operands that require values.
diff block
saved_query = await get_saved_query(team, model.label)
job = await start_job_modeling_run(team, workflow_id, workflow_run_id, saved_query)
-
- key, delta_table, job_id = await materialize_model(model.label, team, saved_query, job)
+ try:
+ key, delta_table, job_id = await materialize_model(model.label, team, saved_query, job)
+ except DataModelingCancelledException:
+ # don't do anything, just continue
+ pass
greptile
logic: Empty pass block after DataModelingCancelledException could leave job in inconsistent state. Should update job status to cancelled
diff block
let mut created_files = vec![];
let mut failed_files = vec![];
- // Create futures for concurrent processing
- let process_futures = files
- .into_iter()
- .map(|file| {
- let tool_call_id_clone = tool_call_id.clone();
- let user_id = self.agent.get_user_id();
-
- async move {
- let result = process_metric_file(
- tool_call_id_clone,
- file.name.clone(),
- file.yml_content.clone(),
- &user_id,
- )
- .await;
-
- (file.name.clone(), result)
- }
- })
- .collect::<Vec<_>>();
-
- // Wait for all futures to complete
- let results = join_all(process_futures).await;
-
- // Process results
- let mut metric_records = vec![];
- let mut metric_ymls = vec![];
- let mut results_vec = vec![];
+ let data_source_id = match self.agent.get_state_value("data_source_id").await {
+ Some(Value::String(id_str)) => Uuid::parse_str(&id_str).map_err(|e| anyhow!("Invalid data source ID format: {}", e))?,
+ Some(_) => bail!("Data source ID is not a string"),
+ None => bail!("Data source ID not found in agent state"),
+ };
- for (file_name, result) in results {
+ // Collect results from processing each file concurrently
+ let process_futures = files.into_iter().map(|file| {
+ let tool_call_id_clone = tool_call_id.clone();
+ let user_id = self.agent.get_user_id();
+ async move {
+ let result = process_metric_file(
+ tool_call_id_clone,
+ file.name.clone(),
+ file.yml_content.clone(),
+ data_source_id,
+ &user_id,
+ )
+ .await;
+ (file.name, result)
+ }
+ });
+ let processed_results = join_all(process_futures).await;
+
+ // Separate successful from failed processing
+ let mut successful_processing: Vec<(
+ MetricFile,
+ MetricYml,
+ String,
+ Vec<IndexMap<String, DataType>>,
+ Vec<Uuid>
+ )> = Vec::new();
+ for (file_name, result) in processed_results {
match result {
- Ok((metric_file, metric_yml, message, results)) => {
- metric_records.push(metric_file);
- metric_ymls.push(metric_yml);
- results_vec.push((message, results));
+ Ok((metric_file, metric_yml, message, results, validated_dataset_ids)) => {
+ successful_processing.push((
+ metric_file,
+ metric_yml,
+ message,
+ results,
+ validated_dataset_ids,
+ ));
}
Err(e) => {
- failed_files.push(FailedFileCreation { name: file_name, error: e.to_string() });
+ failed_files.push(FailedFileCreation {
+ name: file_name,
+ error: e.to_string(),
+ });
}
}
}
- // Second pass - bulk insert records
- let mut conn = match get_pg_pool().get().await {
- Ok(conn) => conn,
- Err(e) => return Err(anyhow!(e)),
- };
+ let metric_records: Vec<MetricFile> = successful_processing.iter().map(|(mf, _, _, _, _)| mf.clone()).collect();
+ let all_validated_dataset_ids: Vec<(Uuid, i32, Vec<Uuid>)> = successful_processing
+ .iter()
+ .map(|(mf, _, _, _, ids)| (mf.id, 1, ids.clone()))
+ .collect();
+
+ let mut conn = get_pg_pool().get().await?;
- // Insert metric files
if !metric_records.is_empty() {
- match insert_into(metric_files::table)
+ if let Err(e) = insert_into(metric_files::table)
.values(&metric_records)
.execute(&mut conn)
.await
{
- Ok(_) => {
- // Get the user ID from the agent state
- let user_id = self.agent.get_user_id();
-
- // Create asset permissions for each metric file
- let now = Utc::now();
- let asset_permissions: Vec<AssetPermission> = metric_records
- .iter()
- .map(|record| AssetPermission {
- identity_id: user_id,
- identity_type: IdentityType::User,
- asset_id: record.id,
- asset_type: AssetType::MetricFile,
- role: AssetPermissionRole::Owner,
+ failed_files.extend(metric_records.iter().map(|r| FailedFileCreation {
+ name: r.file_name.clone(),
+ error: format!("Failed to create metric file record: {}", e),
+ }));
+ } else {
+ let user_id = self.agent.get_user_id();
+ let now = Utc::now();
+
+ let asset_permissions: Vec<AssetPermission> = metric_records
+ .iter()
+ .map(|record| AssetPermission {
+ identity_id: user_id,
+ identity_type: IdentityType::User,
+ asset_id: record.id,
+ asset_type: AssetType::MetricFile,
+ role: AssetPermissionRole::Owner,
+ created_at: now,
+ updated_at: now,
+ deleted_at: None,
+ created_by: user_id,
+ updated_by: user_id,
+ })
+ .collect();
+ if let Err(e) = insert_into(asset_permissions::table)
+ .values(&asset_permissions)
+ .execute(&mut conn)
+ .await
+ {
+ tracing::error!("Error inserting asset permissions: {}", e);
+ }
greptile
logic: Asset permission errors should be propagated to failed_files since this could leave metrics in an inconsistent state without proper ownership
suggested fix
tracing::error!("Error inserting asset permissions: {}", e);
failed_files.extend(metric_records.iter().map(|r| FailedFileCreation {
name: r.file_name.clone(),
+ error: format!("Failed to set asset permissions: {}", e),
}));
}
diff block
+import { Button } from "@/components/common";
+import { IS_DESKTOP } from "@/constants";
+import { api, calculateTokenExpirationTimestamp } from "@/services";
+import { Keytar } from "@/services/keytar.service";
+import type { User } from "@/types";
+import { useMutation, useQuery } from "@tanstack/react-query";
+import Link from "next/link";
+import { useRouter } from "next/router";
+import { useCallback, useEffect } from "react";
+import { setCookie } from "typescript-cookie";
+
+export default function Home() {
+ const router = useRouter();
+
+ const { data, isLoading, refetch } = useQuery<User | null>({
+ queryKey: ["me"],
+ queryFn: () =>
+ api.get<User>("profile/me", { credentials: "include" }).json(),
+ initialData: null,
+ });
+
+ const { mutateAsync } = useMutation({
+ mutationFn: () =>
+ api.post("auth/logout", { credentials: "include" }).json(),
+ });
+
+ const { mutateAsync: createSession } = useMutation({
+ mutationFn: ({
+ accessToken,
+ refreshToken,
+ }: {
+ accessToken: string;
+ refreshToken: string;
+ }) =>
+ api
+ .post("auth/session", {
+ json: { accessToken, refreshToken },
+ credentials: "include",
+ })
+ .json(),
+ });
+
+ const authorizePayload = useCallback(
+ async (payload: string) => {
+ const { accessToken, refreshToken, expiresIn } = JSON.parse(
+ atob(payload)
+ );
+
+ if (IS_DESKTOP) {
+ const accessTokenKeytar = new Keytar("access-token");
+ const refreshTokenKeytar = new Keytar("refresh-token");
+
+ await Promise.all([
+ accessTokenKeytar.savePassword(accessToken),
+ refreshTokenKeytar.savePassword(refreshToken),
+ ]);
+ }
+
+ return createSession({
+ accessToken,
+ refreshToken,
+ }).then(() => {
+ setCookie(
+ "tokenExpirationTimestamp",
+ calculateTokenExpirationTimestamp(expiresIn).toString()
+ );
+
+ refetch();
+ });
+ },
+ [createSession, refetch]
+ );
+
+ useEffect(() => {
+ const payload = router.query?.payload;
+
+ if (payload) {
+ authorizePayload(payload as string).then(() => {
+ router.replace(router.pathname, undefined, { shallow: true });
+ });
+ }
greptile
logic: authorizePayload promise rejection is not handled, could leave user in inconsistent state
diff block
+import "@/styles/globals.scss";
+import type { AppProps } from "next/app";
+import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
+import { Sidebar } from "@/layouts/sidebar/sidebar";
+import { useCallback, useEffect } from "react";
+import { IS_DESKTOP } from "@/constants";
+import { Keytar } from "@/services";
+import type { Auth } from "@/types";
+import { setCookie } from "typescript-cookie";
+
+const queryClient = new QueryClient();
+
+export default function App({ Component, pageProps }: AppProps) {
+ const importLegacyAuth = useCallback(async () => {
+ if (IS_DESKTOP) {
+ const accessTokenKeytar = new Keytar("access-token");
+ const refreshTokenKeytar = new Keytar("refresh-token");
+
+ if (await refreshTokenKeytar.getPassword()) {
+ return;
+ }
greptile
logic: Early return if refresh token exists could leave access token in inconsistent state if it was not previously saved.
suggested fix
+ if (await refreshTokenKeytar.getPassword() && await accessTokenKeytar.getPassword()) {
return;
}
diff block
})
}
+/**
+ * Handles workspace transition state tracking
+ * @param isTransitioning Whether workspace is currently transitioning
+ */
+function setWorkspaceTransitioning(isTransitioning: boolean): void {
+ isWorkspaceTransitioning = isTransitioning;
+
+ // Set a safety timeout to prevent permanently stuck in transition state
+ if (isTransitioning) {
+ setTimeout(() => {
+ if (isWorkspaceTransitioning) {
+ logger.warn('Forcing workspace transition to complete due to timeout');
+ isWorkspaceTransitioning = false;
+ }
+ }, TRANSITION_TIMEOUT);
+ }
greptile
style: The timeout to force-clear transition state could leave the system in an inconsistent state if a transition is actually still in progress. Consider adding additional checks or using a more robust state machine approach.
diff block
return (
<div
- className={`flex items-center px-2 py-1 h-7 hover:bg-gray-100 rounded-sm cursor-pointer group ${isSelectable ? "" : "opacity-50 pointer-events-none"}`}
+ className={`flex items-center px-2 py-1 h-7 hover:bg-gray-100 rounded-sm cursor-pointer group ${isSelectable || isSelected ? "" : "opacity-50 pointer-events-none"}`}
greptile
logic: Allowing interaction when isSelected but not isSelectable could lead to inconsistent state management - the item could be deselected even when it shouldn't be selectable
```suggestion
+ className={`flex items-center px-2 py-1 h-7 hover:bg-gray-100 rounded-sm cursor-pointer group ${isSelectable ? "" : "opacity-50 pointer-events-none"}`}
```
diff block
+import { createStore } from "zustand";
+import { v4 as uuidV4 } from "uuid";
+import { FacetDto, FacetOptionDto, FacetsConfig } from "../models";
+
+export type FacetState = {
+ facetsConfig: FacetsConfig | null;
+ setFacetsConfig: (facetsConfig: FacetsConfig) => void;
+
+ facets: FacetDto[] | null;
+ setFacets: (facets: FacetDto[]) => void;
+
+ facetOptions: Record<string, FacetOptionDto[]> | null;
+ setFacetOptions: (facetOptions: Record<string, FacetOptionDto[]>) => void;
+
+ facetOptionsLoadingState: Record<string, string>;
+ setFacetOptionsLoadingState: (loadingState: Record<string, string>) => void;
+
+ queriesState: {
+ facetOptionQueries: Record<string, string> | null;
+ filterCel: string | null;
+ };
+ setQueriesState: (
+ filterCel: string,
+ facetOptionQueries: Record<string, string>
+ ) => void;
+
+ facetsState: Record<string, any>;
+
+ patchFacetsState: (facetsStatePatch: Record<string, any>) => void;
+ setFacetState: (facetId: string, state: any) => void;
+
+ areFacetOptionsHandled: boolean;
+ setAreFacetOptionsHandled: (areFacetOptionsHandled: boolean) => void;
+
+ facetsStateRefreshToken: string | null;
+
+ areQueryparamsSet: boolean;
+ setAreQueryparamsSet: (areQueryparamsSet: boolean) => void;
+
+ isInitialStateHandled: boolean;
+ setIsInitialStateHandled: (isInitialStateHandled: boolean) => void;
+
+ clearFilters: () => void;
+
+ changedFacetId: string | null;
+ setChangedFacetId: (facetId: string | null) => void;
+
+ areOptionsReLoading: boolean;
+ setAreOptionsReLoading: (isLoading: boolean) => void;
+
+ areOptionsLoading: boolean;
+ setAreOptionsLoading: (isLoading: boolean) => void;
+};
+
+export const createFacetStore = () =>
+ createStore<FacetState>((set, state) => ({
+ facetsConfig: null,
+ setFacetsConfig: (facetsConfig: FacetsConfig) => set({ facetsConfig }),
+
+ facets: null,
+ setFacets: (facets: FacetDto[]) => set({ facets }),
+
+ facetOptions: null,
+ setFacetOptions: (facetOptions: Record<string, FacetOptionDto[]>) =>
+ set({ facetOptions }),
+
+ facetOptionsLoadingState: {},
+ setFacetOptionsLoadingState: (loadingState: Record<string, string>) =>
+ set({ facetOptionsLoadingState: loadingState }),
+
+ queriesState: {
+ facetOptionQueries: null,
+ filterCel: null,
+ },
+ setQueriesState: (filterCel, facetOptionQueries) =>
+ set({
+ queriesState: {
+ filterCel,
+ facetOptionQueries,
+ },
+ }),
+
+ facetsState: {},
+ patchFacetsState: (facetsStatePatch) => {
+ set({
+ // So that it only triggers refresh when facetsState is patched once
+ facetsStateRefreshToken: state().facetsStateRefreshToken || uuidV4(),
+ facetsState: {
+ ...(state().facetsState || {}),
+ ...facetsStatePatch,
+ },
+ });
+ },
+ setFacetState(facetId, facetState) {
+ set({
+ // So that it only triggers refresh when facetsState is changed once (option is selected\deselected by user)
+ facetsStateRefreshToken: uuidV4(),
+ facetsState: {
+ ...(state().facetsState || {}),
+ [facetId]: facetState,
+ },
+ });
+ },
+
+ areFacetOptionsHandled: false,
+ setAreFacetOptionsHandled: (areFacetOptionsHandled) =>
+ set({
+ areFacetOptionsHandled,
+ }),
+
+ facetsStateRefreshToken: null,
+
+ areQueryparamsSet: false,
+ setAreQueryparamsSet: (areQueryparamsSet: boolean) =>
+ set({ areQueryparamsSet }),
+
+ isInitialStateHandled: false,
+ setIsInitialStateHandled: (isInitialStateHandled: boolean) =>
+ set({ isInitialStateHandled }),
+
+ clearFilters: () => {
+ return set({
+ isInitialStateHandled: false,
+ facetsState: {},
+ facetsStateRefreshToken: uuidV4(),
+ areFacetOptionsHandled: false,
+ });
+ },
greptile
logic: clearFilters resets isInitialStateHandled but not areQueryparamsSet, which may cause inconsistent state during reinitialization
diff block
+// Test script for simulating the tenant invitation process
+const admin = require('firebase-admin');
+const path = require('path');
+const { v4: uuidv4 } = require('uuid');
+
+// Load service account key with corrected path
+const serviceAccountPath = path.join(__dirname, '../../propagentic-firebase-adminsdk-fbsvc-c20b027723.json');
+const serviceAccount = require(serviceAccountPath);
+
+// Initialize the app with admin privileges
+admin.initializeApp({
+ credential: admin.credential.cert(serviceAccount),
+ databaseURL: "https://propagentic-default-rtdb.firebaseio.com"
+});
+
+const db = admin.firestore();
+
+/**
+ * Test script to execute the full invitation process
+ * 1. Create a landlord (if needed)
+ * 2. Create a property (if needed)
+ * 3. Create an invitation
+ * 4. Send an email
+ * 5. Accept the invitation (simulate tenant)
+ */
+async function testInviteProcess() {
+ try {
+ console.log('============================================');
+ console.log('TENANT INVITATION PROCESS TESTING SCRIPT');
+ console.log('============================================');
+
+ // Configuration - CHANGE THESE VALUES AS NEEDED
+ const testLandlordEmail = 'landlord@example.com';
+ const testTenantEmail = 'tenant@example.com'; // The email to receive the invitation
+ const testPropertyName = 'Test Property';
+ const testPropertyAddress = '123 Test Street, Testville';
+
+ // Step 1: Ensure a test landlord exists
+ console.log('\n1. Ensuring test landlord exists...');
+ const landlordId = await ensureTestLandlordExists(testLandlordEmail);
+ console.log(`✅ Using landlord with ID: ${landlordId}`);
+
+ // Step 2: Ensure a test property exists
+ console.log('\n2. Ensuring test property exists...');
+ const propertyId = await ensureTestPropertyExists(landlordId, testPropertyName, testPropertyAddress);
+ console.log(`✅ Using property with ID: ${propertyId}`);
+
+ // Step 3: Create an invitation
+ console.log('\n3. Creating tenant invitation...');
+ const inviteId = await createInvitation(landlordId, propertyId, testPropertyName, testTenantEmail);
+ console.log(`✅ Created invitation with ID: ${inviteId}`);
+
+ // Step 4: Send an email via the mail collection
+ console.log('\n4. Sending invitation email...');
+ await sendInvitationEmail(inviteId, landlordId, propertyId, testPropertyName, testTenantEmail);
+ console.log(`✅ Email document added to mail collection`);
+
+ // Step 5: Simulate tenant accepting the invitation
+ console.log('\n5. Simulating tenant accepting invitation...');
+ const tenantId = await simulateTenantAcceptingInvitation(inviteId, testTenantEmail);
+ console.log(`✅ Invitation accepted by tenant with ID: ${tenantId}`);
+
+ console.log('\n============================================');
+ console.log('🎉 TENANT INVITATION PROCESS COMPLETED SUCCESSFULLY');
+ console.log('============================================');
+
+ } catch (error) {
+ console.error('❌ Error in tenant invitation process:', error);
+ } finally {
+ // Explicitly exit when done since admin SDK keeps connections open
+ process.exit(0);
+ }
+}
+
+/**
+ * Ensure a test landlord exists in the database
+ */
+async function ensureTestLandlordExists(email) {
+ // Check if user with email already exists
+ const usersRef = db.collection('users');
+ const snapshot = await usersRef.where('email', '==', email).limit(1).get();
+
+ if (!snapshot.empty) {
+ return snapshot.docs[0].id;
+ }
+
+ // Create a new landlord user
+ const landlordId = uuidv4();
+ await db.collection('users').doc(landlordId).set({
+ email: email,
+ userType: 'landlord',
+ role: 'landlord',
+ displayName: 'Test Landlord',
+ firstName: 'Test',
+ lastName: 'Landlord',
+ createdAt: admin.firestore.FieldValue.serverTimestamp(),
+ onboardingComplete: true
+ });
+
+ console.log(`Created new landlord user: ${email}`);
+ return landlordId;
+}
+
+/**
+ * Ensure a test property exists in the database
+ */
+async function ensureTestPropertyExists(landlordId, propertyName, address) {
+ // Check if property already exists for this landlord
+ const propertiesRef = db.collection('properties');
+ const snapshot = await propertiesRef.where('landlordId', '==', landlordId)
+ .where('name', '==', propertyName)
+ .limit(1).get();
+
+ if (!snapshot.empty) {
+ return snapshot.docs[0].id;
+ }
+
+ // Create a new property
+ const propertyData = {
+ name: propertyName,
+ address: address,
+ landlordId: landlordId,
+ createdAt: admin.firestore.FieldValue.serverTimestamp(),
+ isOccupied: false,
+ numberOfUnits: 1
+ };
+
+ const propertyRef = await db.collection('properties').add(propertyData);
+ console.log(`Created new property: ${propertyName}`);
+ return propertyRef.id;
+}
+
+/**
+ * Create an invitation in the invites collection
+ */
+async function createInvitation(landlordId, propertyId, propertyName, tenantEmail) {
+ // Create expiration date (7 days from now)
+ const now = admin.firestore.Timestamp.now();
+ const expiresAt = new admin.firestore.Timestamp(
+ now.seconds + 7 * 24 * 60 * 60,
+ now.nanoseconds
+ );
+
+ // Create invitation document
+ const inviteData = {
+ tenantEmail: tenantEmail.toLowerCase(),
+ propertyId: propertyId,
+ landlordId: landlordId,
+ propertyName: propertyName,
+ landlordName: 'Test Landlord',
+ status: 'pending',
+ emailSentStatus: 'pending',
+ createdAt: admin.firestore.FieldValue.serverTimestamp(),
+ expiresAt: expiresAt
+ };
+
+ const inviteRef = await db.collection('invites').add(inviteData);
+ return inviteRef.id;
+}
+
+/**
+ * Send an invitation email by adding a document to the mail collection
+ */
+async function sendInvitationEmail(inviteId, landlordId, propertyId, propertyName, tenantEmail) {
+ // Create mail document as per the Firebase Extension format
+ const mailData = {
+ to: tenantEmail,
+ message: {
+ subject: 'You have been invited to PropAgentic',
+ text: `You have been invited to join ${propertyName} on PropAgentic. Click the link to accept: https://propagentic.com/invite/${inviteId}`,
+ html: `
+ <div style="font-family: Arial, sans-serif; color: #333; max-width: 600px; margin: 0 auto;">
+ <h2 style="color: #176B5D;">PropAgentic Invitation</h2>
+ <p>Hello,</p>
+ <p>You've been invited to join <strong>${propertyName}</strong> on PropAgentic.</p>
+ <p><a href="https://propagentic.com/invite/${inviteId}" style="display: inline-block; background-color: #176B5D; color: white; padding: 10px 20px; text-decoration: none; border-radius: 4px;">Accept Invitation</a></p>
+ <p>This invitation will expire in 7 days.</p>
+ <p>If you have any questions, please contact your property manager.</p>
+ </div>
+ `
+ }
+ };
+
+ // Add to mail collection
+ const mailRef = await db.collection('mail').add(mailData);
+
+ // Update the invite with emailSentStatus
+ await db.collection('invites').doc(inviteId).update({
+ emailSentStatus: 'sent',
+ updatedAt: admin.firestore.FieldValue.serverTimestamp()
+ });
+
+ return mailRef.id;
+}
+
+/**
+ * Simulate a tenant accepting the invitation
+ */
+async function simulateTenantAcceptingInvitation(inviteId, tenantEmail) {
+ // First, ensure tenant user exists
+ const tenantId = await ensureTenantUserExists(tenantEmail);
+
+ // Get the invitation
+ const inviteRef = db.collection('invites').doc(inviteId);
+ const invite = await inviteRef.get();
+
+ if (!invite.exists) {
+ throw new Error(`Invitation with ID ${inviteId} not found`);
+ }
+
+ const inviteData = invite.data();
+
+ // Update invitation status
+ await inviteRef.update({
+ status: 'accepted',
+ tenantId: tenantId,
+ acceptedAt: admin.firestore.FieldValue.serverTimestamp(),
+ updatedAt: admin.firestore.FieldValue.serverTimestamp()
+ });
+
+ // Update tenant user profile with property association
+ await db.collection('users').doc(tenantId).update({
+ propertyId: inviteData.propertyId,
+ landlordId: inviteData.landlordId,
+ joinDate: admin.firestore.FieldValue.serverTimestamp()
+ });
greptile
style: No transaction used for multi-document update. Could leave data in inconsistent state if one update fails
suggested fix
// Update tenant user profile with property association
+ await db.runTransaction(async (transaction) => {
+ transaction.update(db.collection('users').doc(tenantId), {
propertyId: inviteData.propertyId,
landlordId: inviteData.landlordId,
joinDate: admin.firestore.FieldValue.serverTimestamp()
});
});
diff block
+import { BadRequestException, Injectable } from '@nestjs/common';
+
+import { Request } from 'express';
+import { isDefined } from 'twenty-shared/utils';
+
+import { RestApiBaseHandler } from 'src/engine/api/rest/core/interfaces/rest-api-base.handler';
+
+@Injectable()
+export class RestApiCreateManyHandler extends RestApiBaseHandler {
+ async handle(request: Request) {
+ const { objectMetadataNamePlural, objectMetadata, repository } =
+ await this.getRepositoryAndMetadataOrFail(request);
+
+ const body = request.body;
+
+ if (!Array.isArray(body)) {
+ throw new BadRequestException('Body must be an array');
+ }
+
+ if (body.length === 0) {
+ throw new BadRequestException('Input must not be empty');
+ }
+
+ const overriddenRecordsToCreate: Record<string, any>[] = [];
+
+ for (const recordToCreate of body) {
+ const overriddenBody = await this.recordInputTransformerService.process({
+ recordInput: recordToCreate,
+ objectMetadataMapItem: objectMetadata.objectMetadataMapItem,
+ });
+
+ const recordExists =
+ isDefined(overriddenBody.id) &&
+ (await repository.exists({
+ where: {
+ id: overriddenBody.id,
+ },
+ }));
+
+ if (recordExists) {
+ throw new BadRequestException('Record already exists');
+ }
+
+ overriddenRecordsToCreate.push(overriddenBody);
+ }
+
+ const createdRecords = await repository.save(overriddenRecordsToCreate);
greptile
style: No try-catch around repository.save() could leave system in inconsistent state if save fails partially
diff block
async setUserWorkspaceRoleMap(
workspaceId: string,
userWorkspaceRoleMap: UserWorkspaceRoleMap,
- ): Promise<{
- newUserWorkspaceRoleMapVersion: string;
- }> {
- const [, newUserWorkspaceRoleMapVersion] = await Promise.all([
+ ): Promise<void> {
+ await Promise.all([
this.cacheStorageService.set<UserWorkspaceRoleMap>(
`${WorkspaceCacheKeys.MetadataPermissionsUserWorkspaceRoleMap}:${workspaceId}`,
userWorkspaceRoleMap,
TTL_INFINITE,
),
this.setUserWorkspaceRoleMapVersion(workspaceId),
]);
-
- return { newUserWorkspaceRoleMapVersion };
}
greptile
logic: Check that both promises resolve successfully before proceeding. Currently a failure in setUserWorkspaceRoleMapVersion would leave inconsistent state.
diff block
+import time
+from datetime import datetime
+from datetime import timedelta
+from datetime import timezone
+from time import sleep
+from typing import Any
+from typing import cast
+from uuid import uuid4
+
+from celery import Celery
+from celery import shared_task
+from celery import Task
+from celery.exceptions import SoftTimeLimitExceeded
+from pydantic import ValidationError
+from redis import Redis
+from redis.exceptions import LockError
+from redis.lock import Lock as RedisLock
+from sqlalchemy.orm import Session
+
+from ee.onyx.configs.app_configs import DEFAULT_PERMISSION_DOC_SYNC_FREQUENCY
+from ee.onyx.db.connector_credential_pair import get_all_auto_sync_cc_pairs
+from ee.onyx.db.document import upsert_document_external_perms
+from ee.onyx.external_permissions.sync_params import DOC_PERMISSION_SYNC_PERIODS
+from ee.onyx.external_permissions.sync_params import DOC_PERMISSIONS_FUNC_MAP
+from ee.onyx.external_permissions.sync_params import (
+ DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION,
+)
+from onyx.access.models import DocExternalAccess
+from onyx.background.celery.apps.app_base import task_logger
+from onyx.background.celery.celery_redis import celery_find_task
+from onyx.background.celery.celery_redis import celery_get_queue_length
+from onyx.background.celery.celery_redis import celery_get_queued_task_ids
+from onyx.background.celery.celery_redis import celery_get_unacked_task_ids
+from onyx.background.celery.tasks.shared.tasks import OnyxCeleryTaskCompletionStatus
+from onyx.configs.app_configs import JOB_TIMEOUT
+from onyx.configs.constants import CELERY_GENERIC_BEAT_LOCK_TIMEOUT
+from onyx.configs.constants import CELERY_PERMISSIONS_SYNC_LOCK_TIMEOUT
+from onyx.configs.constants import CELERY_TASK_WAIT_FOR_FENCE_TIMEOUT
+from onyx.configs.constants import DANSWER_REDIS_FUNCTION_LOCK_PREFIX
+from onyx.configs.constants import DocumentSource
+from onyx.configs.constants import OnyxCeleryPriority
+from onyx.configs.constants import OnyxCeleryQueues
+from onyx.configs.constants import OnyxCeleryTask
+from onyx.configs.constants import OnyxRedisConstants
+from onyx.configs.constants import OnyxRedisLocks
+from onyx.configs.constants import OnyxRedisSignals
+from onyx.connectors.factory import validate_ccpair_for_user
+from onyx.db.connector import mark_cc_pair_as_permissions_synced
+from onyx.db.connector_credential_pair import get_connector_credential_pair_from_id
+from onyx.db.document import get_document_ids_for_connector_credential_pair
+from onyx.db.document import upsert_document_by_connector_credential_pair
+from onyx.db.engine import get_session_with_current_tenant
+from onyx.db.enums import AccessType
+from onyx.db.enums import ConnectorCredentialPairStatus
+from onyx.db.enums import SyncStatus
+from onyx.db.enums import SyncType
+from onyx.db.models import ConnectorCredentialPair
+from onyx.db.sync_record import insert_sync_record
+from onyx.db.sync_record import update_sync_record_status
+from onyx.db.users import batch_add_ext_perm_user_if_not_exists
+from onyx.indexing.indexing_heartbeat import IndexingHeartbeatInterface
+from onyx.redis.redis_connector import RedisConnector
+from onyx.redis.redis_connector_doc_perm_sync import RedisConnectorPermissionSync
+from onyx.redis.redis_connector_doc_perm_sync import RedisConnectorPermissionSyncPayload
+from onyx.redis.redis_pool import get_redis_client
+from onyx.redis.redis_pool import get_redis_replica_client
+from onyx.redis.redis_pool import redis_lock_dump
+from onyx.server.runtime.onyx_runtime import OnyxRuntime
+from onyx.server.utils import make_short_id
+from onyx.utils.logger import doc_permission_sync_ctx
+from onyx.utils.logger import format_error_for_logging
+from onyx.utils.logger import LoggerContextVars
+from onyx.utils.logger import setup_logger
+from onyx.utils.telemetry import optional_telemetry
+from onyx.utils.telemetry import RecordType
+
+
+logger = setup_logger()
+
+
+DOCUMENT_PERMISSIONS_UPDATE_MAX_RETRIES = 3
+
+
+# 5 seconds more than RetryDocumentIndex STOP_AFTER+MAX_WAIT
+LIGHT_SOFT_TIME_LIMIT = 105
+LIGHT_TIME_LIMIT = LIGHT_SOFT_TIME_LIMIT + 15
+
+
+"""Jobs / utils for kicking off doc permissions sync tasks."""
+
+
+def _is_external_doc_permissions_sync_due(cc_pair: ConnectorCredentialPair) -> bool:
+ """Returns boolean indicating if external doc permissions sync is due."""
+
+ if cc_pair.access_type != AccessType.SYNC:
+ return False
+
+ # skip doc permissions sync if not active
+ if cc_pair.status != ConnectorCredentialPairStatus.ACTIVE:
+ return False
+
+ # If the last sync is None, it has never been run so we run the sync
+ last_perm_sync = cc_pair.last_time_perm_sync
+ if last_perm_sync is None:
+ return True
+
+ source_sync_period = DOC_PERMISSION_SYNC_PERIODS.get(cc_pair.connector.source)
+
+ if not source_sync_period:
+ source_sync_period = DEFAULT_PERMISSION_DOC_SYNC_FREQUENCY
+
+ source_sync_period *= int(OnyxRuntime.get_doc_permission_sync_multiplier())
+
+ # If the last sync is greater than the full fetch period, we run the sync
+ next_sync = last_perm_sync + timedelta(seconds=source_sync_period)
+ if datetime.now(timezone.utc) >= next_sync:
+ return True
+
+ return False
+
+
+@shared_task(
+ name=OnyxCeleryTask.CHECK_FOR_DOC_PERMISSIONS_SYNC,
+ ignore_result=True,
+ soft_time_limit=JOB_TIMEOUT,
+ bind=True,
+)
+def check_for_doc_permissions_sync(self: Task, *, tenant_id: str) -> bool | None:
+ # TODO(rkuo): merge into check function after lookup table for fences is added
+
+ # we need to use celery's redis client to access its redis data
+ # (which lives on a different db number)
+ r = get_redis_client()
+ r_replica = get_redis_replica_client()
+ r_celery: Redis = self.app.broker_connection().channel().client # type: ignore
+
+ lock_beat: RedisLock = r.lock(
+ OnyxRedisLocks.CHECK_CONNECTOR_DOC_PERMISSIONS_SYNC_BEAT_LOCK,
+ timeout=CELERY_GENERIC_BEAT_LOCK_TIMEOUT,
+ )
+
+ # these tasks should never overlap
+ if not lock_beat.acquire(blocking=False):
+ return None
+
+ try:
+ # get all cc pairs that need to be synced
+ cc_pair_ids_to_sync: list[int] = []
+ with get_session_with_current_tenant() as db_session:
+ cc_pairs = get_all_auto_sync_cc_pairs(db_session)
+
+ for cc_pair in cc_pairs:
+ if _is_external_doc_permissions_sync_due(cc_pair):
+ cc_pair_ids_to_sync.append(cc_pair.id)
+
+ lock_beat.reacquire()
+ for cc_pair_id in cc_pair_ids_to_sync:
+ payload_id = try_creating_permissions_sync_task(
+ self.app, cc_pair_id, r, tenant_id
+ )
+ if not payload_id:
+ continue
+
+ task_logger.info(
+ f"Permissions sync queued: cc_pair={cc_pair_id} id={payload_id}"
+ )
+
+ # we want to run this less frequently than the overall task
+ lock_beat.reacquire()
+ if not r.exists(OnyxRedisSignals.BLOCK_VALIDATE_PERMISSION_SYNC_FENCES):
+ # clear any permission fences that don't have associated celery tasks in progress
+ # tasks can be in the queue in redis, in reserved tasks (prefetched by the worker),
+ # or be currently executing
+ try:
+ validate_permission_sync_fences(
+ tenant_id, r, r_replica, r_celery, lock_beat
+ )
+ except Exception:
+ task_logger.exception(
+ "Exception while validating permission sync fences"
+ )
+
+ r.set(OnyxRedisSignals.BLOCK_VALIDATE_PERMISSION_SYNC_FENCES, 1, ex=300)
+
+ # use a lookup table to find active fences. We still have to verify the fence
+ # exists since it is an optimization and not the source of truth.
+ lock_beat.reacquire()
+ keys = cast(set[Any], r_replica.smembers(OnyxRedisConstants.ACTIVE_FENCES))
+ for key in keys:
+ key_bytes = cast(bytes, key)
+
+ if not r.exists(key_bytes):
+ r.srem(OnyxRedisConstants.ACTIVE_FENCES, key_bytes)
+ continue
+
+ key_str = key_bytes.decode("utf-8")
+ if key_str.startswith(RedisConnectorPermissionSync.FENCE_PREFIX):
+ with get_session_with_current_tenant() as db_session:
+ monitor_ccpair_permissions_taskset(
+ tenant_id, key_bytes, r, db_session
+ )
+ task_logger.info(f"check_for_doc_permissions_sync finished: tenant={tenant_id}")
+ except SoftTimeLimitExceeded:
+ task_logger.info(
+ "Soft time limit exceeded, task is being terminated gracefully."
+ )
+ except Exception as e:
+ error_msg = format_error_for_logging(e)
+ task_logger.warning(
+ f"Unexpected check_for_doc_permissions_sync exception: tenant={tenant_id} {error_msg}"
+ )
+ task_logger.exception(
+ f"Unexpected check_for_doc_permissions_sync exception: tenant={tenant_id}"
+ )
+ finally:
+ if lock_beat.owned():
+ lock_beat.release()
+
+ return True
+
+
+def try_creating_permissions_sync_task(
+ app: Celery,
+ cc_pair_id: int,
+ r: Redis,
+ tenant_id: str,
+) -> str | None:
+ """Returns a randomized payload id on success.
+ Returns None if no syncing is required."""
+ LOCK_TIMEOUT = 30
+
+ payload_id: str | None = None
+
+ redis_connector = RedisConnector(tenant_id, cc_pair_id)
+
+ lock: RedisLock = r.lock(
+ DANSWER_REDIS_FUNCTION_LOCK_PREFIX + "try_generate_permissions_sync_tasks",
+ timeout=LOCK_TIMEOUT,
+ )
+
+ acquired = lock.acquire(blocking_timeout=LOCK_TIMEOUT / 2)
+ if not acquired:
+ return None
+
+ try:
+ if redis_connector.permissions.fenced:
+ return None
+
+ if redis_connector.delete.fenced:
+ return None
+
+ if redis_connector.prune.fenced:
+ return None
+
+ redis_connector.permissions.generator_clear()
+ redis_connector.permissions.taskset_clear()
+
+ custom_task_id = f"{redis_connector.permissions.generator_task_key}_{uuid4()}"
+
+ # create before setting fence to avoid race condition where the monitoring
+ # task updates the sync record before it is created
+ try:
+ with get_session_with_current_tenant() as db_session:
+ insert_sync_record(
+ db_session=db_session,
+ entity_id=cc_pair_id,
+ sync_type=SyncType.EXTERNAL_PERMISSIONS,
+ )
+ except Exception:
+ task_logger.exception("insert_sync_record exceptioned.")
+
greptile
logic: Exception is caught and logged but execution continues, potentially leaving system in inconsistent state if sync record creation fails
suggested fix
except Exception:
task_logger.exception("insert_sync_record exceptioned.")
+ raise # Re-raise to abort the sync process since sync record is critical
diff block
+import { and, eq } from 'drizzle-orm'
+import { NextRequest, NextResponse } from 'next/server'
+import { randomUUID } from 'crypto'
+import { getSession } from '@/lib/auth'
+import { db } from '@/db'
+import { workspace, workspaceMember, workspaceInvitation, user } from '@/db/schema'
+
+// GET /api/workspaces/invitations/accept - Accept an invitation via token
+export async function GET(req: NextRequest) {
+ const token = req.nextUrl.searchParams.get('token')
+
+ if (!token) {
+ // Redirect to a page explaining the error
+ return NextResponse.redirect(new URL('/invitation-error?reason=missing-token', process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ const session = await getSession()
+
+ if (!session?.user?.id) {
+ // Store the token in a query param and redirect to login page
+ return NextResponse.redirect(new URL(`/auth/signin?callbackUrl=${encodeURIComponent(`/api/workspaces/invitations/accept?token=${token}`)}`, process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ try {
+ // Find the invitation by token
+ const invitation = await db
+ .select()
+ .from(workspaceInvitation)
+ .where(eq(workspaceInvitation.token, token))
+ .then(rows => rows[0])
+
+ if (!invitation) {
+ return NextResponse.redirect(new URL('/invitation-error?reason=invalid-token', process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ // Check if invitation has expired
+ if (new Date() > new Date(invitation.expiresAt)) {
+ return NextResponse.redirect(new URL('/invitation-error?reason=expired', process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ // Check if invitation is already accepted
+ if (invitation.status !== 'pending') {
+ return NextResponse.redirect(new URL('/invitation-error?reason=already-processed', process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ // Check if invitation email matches the logged-in user
+ if (invitation.email.toLowerCase() !== session.user.email.toLowerCase()) {
+ return NextResponse.redirect(new URL('/invitation-error?reason=email-mismatch', process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ // Get the workspace details
+ const workspaceDetails = await db
+ .select()
+ .from(workspace)
+ .where(eq(workspace.id, invitation.workspaceId))
+ .then(rows => rows[0])
+
+ if (!workspaceDetails) {
+ return NextResponse.redirect(new URL('/invitation-error?reason=workspace-not-found', process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ // Check if user is already a member
+ const existingMembership = await db
+ .select()
+ .from(workspaceMember)
+ .where(
+ and(
+ eq(workspaceMember.workspaceId, invitation.workspaceId),
+ eq(workspaceMember.userId, session.user.id)
+ )
+ )
+ .then(rows => rows[0])
+
+ if (existingMembership) {
+ // User is already a member, just mark the invitation as accepted and redirect
+ await db
+ .update(workspaceInvitation)
+ .set({
+ status: 'accepted',
+ updatedAt: new Date(),
+ })
+ .where(eq(workspaceInvitation.id, invitation.id))
+
+ return NextResponse.redirect(new URL(`/w/${invitation.workspaceId}`, process.env.NEXT_PUBLIC_APP_URL || 'https://simstudio.ai'))
+ }
+
+ // Add user to workspace
+ await db
+ .insert(workspaceMember)
+ .values({
+ id: randomUUID(),
+ workspaceId: invitation.workspaceId,
+ userId: session.user.id,
+ role: invitation.role,
+ joinedAt: new Date(),
+ updatedAt: new Date(),
+ })
+
+ // Mark invitation as accepted
+ await db
+ .update(workspaceInvitation)
+ .set({
+ status: 'accepted',
+ updatedAt: new Date(),
+ })
+ .where(eq(workspaceInvitation.id, invitation.id))
greptile
logic: These database operations should be wrapped in a transaction to ensure atomicity. If the second operation fails, we could end up with an inconsistent state.
suggested fix
+ // Add user to workspace and mark invitation as accepted atomically
+ await db.transaction(async (tx) => {
+ await tx
.insert(workspaceMember)
.values({
id: randomUUID(),
workspaceId: invitation.workspaceId,
userId: session.user.id,
role: invitation.role,
joinedAt: new Date(),
updatedAt: new Date(),
})
+ await tx
.update(workspaceInvitation)
.set({
status: 'accepted',
updatedAt: new Date(),
})
.where(eq(workspaceInvitation.id, invitation.id))
})
diff block
+import { ServiceCenterFieldActionDropdown } from '@/settings/service-center/sectors/components/ServiceCenterFieldActionDropdown';
+import { ServiceCenterSectorTableRow } from '@/settings/service-center/sectors/components/ServiceCenterSectorTableRow';
+import { useDeleteSector } from '@/settings/service-center/sectors/hooks/useDeleteSector';
+import { Sector } from '@/settings/service-center/sectors/types/Sector';
+import { SettingsPath } from '@/types/SettingsPath';
+import { useIsMobile } from '@/ui/utilities/responsive/hooks/useIsMobile';
+import styled from '@emotion/styled';
+import { useNavigate } from 'react-router-dom';
+import { Section } from 'twenty-ui';
+import { getSettingsPath } from '~/utils/navigation/getSettingsPath';
+
+const StyledShowServiceCenterTabs = styled.div<{ isMobile: boolean }>`
+ display: flex;
+ flex: 1 0 0;
+ flex-direction: column;
+ justify-content: start;
+ width: 100%;
+`;
+
+const StyledSection = styled(Section)`
+ background: ${({ theme }) => theme.background.secondary};
+ border: 1px solid ${({ theme }) => theme.border.color.medium};
+ border-radius: ${({ theme }) => theme.spacing(1)};
+ margin-top: ${({ theme }) => theme.spacing(4)};
+`;
+
+export const TAB_LIST_COMPONENT_ID = 'show-page-right-tab-list';
+
+type ServiceCenterSectorsProps = {
+ sectors: Sector[];
+ refetchSectors: () => void;
+ isRightDrawer?: boolean;
+ loading?: boolean;
+};
+
+export const ServiceCenterSectors = ({
+ sectors,
+ refetchSectors,
+ loading,
+ isRightDrawer = false,
+}: ServiceCenterSectorsProps) => {
+ // const { t } = useTranslation();
+ const navigate = useNavigate();
+ const isMobile = useIsMobile() || isRightDrawer;
+
+ const { deleteSectorById } = useDeleteSector();
+
+ const handleEditSector = (sectorName: string) => {
+ const path = getSettingsPath(SettingsPath.ServiceCenterEditSector).replace(
+ ':sectorSlug',
+ sectorName,
+ );
+
+ navigate(path);
+ };
+
+ return (
+ <StyledShowServiceCenterTabs isMobile={isMobile}>
+ {sectors?.length > 0 && (
+ <StyledSection>
+ {sectors.map((sector) => (
+ <ServiceCenterSectorTableRow
+ key={sector.id}
+ sectorName={sector.name}
+ sectorIcon={sector.icon}
+ accessory={
+ <ServiceCenterFieldActionDropdown
+ modalMessage={{
+ title: 'Delete sector',
+ subtitle: 'This will permanently delete this sector.',
+ }}
+ scopeKey={sector.name}
+ onEdit={() => {
+ handleEditSector(sector.name);
+ }}
+ onDelete={async () => {
+ await deleteSectorById(sector.id);
+ refetchSectors();
+ }}
greptile
style: consider handling potential deletion errors to prevent UI from getting into an inconsistent state
diff block
+import { LocalStorage } from "@raycast/api";
+
+/**
+ * Type for values that can be stored in LocalStorage
+ * LocalStorage can store strings, numbers, booleans, and objects that can be serialized to JSON
+ */
+export type StorageValue = string | number | boolean | object;
+
+/**
+ * Gets an item from LocalStorage with error handling
+ * @param key The key to retrieve
+ * @param defaultValue Optional default value if the key doesn't exist or an error occurs
+ * @returns The stored value or the default value
+ */
+export async function getStorageItem<T>(key: string, defaultValue?: T): Promise<T | undefined> {
+ try {
+ const value = await LocalStorage.getItem(key);
+ if (value === undefined) {
+ return defaultValue;
+ }
+ return typeof value === "string" ? (JSON.parse(value) as T) : (value as unknown as T);
+ } catch (error) {
+ console.error(`Failed to get item from storage: ${key}`, error);
+ return defaultValue;
+ }
+}
+
+/**
+ * Sets an item in LocalStorage with error handling
+ * @param key The key to set
+ * @param value The value to store
+ * @returns True if successful, false if an error occurred
+ */
+export async function setStorageItem(key: string, value: StorageValue): Promise<boolean> {
+ try {
+ const valueToStore = typeof value === "object" ? JSON.stringify(value) : value;
+ await LocalStorage.setItem(key, valueToStore);
+ return true;
+ } catch (error) {
+ console.error(`Failed to save item to storage: ${key}`, error);
+ return false;
+ }
+}
+
+/**
+ * Removes an item from LocalStorage with error handling
+ * @param key The key to remove
+ * @returns True if successful, false if an error occurred
+ */
+export async function removeStorageItem(key: string): Promise<boolean> {
+ try {
+ await LocalStorage.removeItem(key);
+ return true;
+ } catch (error) {
+ console.error(`Failed to remove item from storage: ${key}`, error);
+ return false;
+ }
+}
+
+/**
+ * Saves multiple items to LocalStorage in a batch with error handling
+ * @param items Object with keys and values to store
+ * @returns True if all items were saved successfully, false if any errors occurred
+ */
+export async function batchSetStorageItems(items: Record<string, StorageValue>): Promise<boolean> {
+ try {
+ await Promise.all(
+ Object.entries(items).map(([key, value]) => {
+ const valueToStore = typeof value === "object" ? JSON.stringify(value) : value;
+ return LocalStorage.setItem(key, valueToStore);
+ })
+ );
greptile
style: Promise.all will stop at first error, potentially leaving storage in inconsistent state. Consider using allSettled for more robust error handling
suggested fix
+ const results = await Promise.allSettled(
Object.entries(items).map(([key, value]) => {
const valueToStore = typeof value === "object" ? JSON.stringify(value) : value;
return LocalStorage.setItem(key, valueToStore);
})
);
+ return results.every(result => result.status === 'fulfilled');
diff block
state: {
blocks: initialState.blocks,
edges: initialState.edges,
+ loopBlocks: initialState.loopBlocks,
loops: initialState.loops,
},
greptile
logic: Inconsistent state structure - `loops` is still included alongside `loopBlocks` without clear distinction between their roles
diff block
overlap = 200,
lengthFunction = (text: string) => text.length,
keepSeparator = false,
+ separatorPosition,
addStartIndex = false,
stripWhitespace = true,
- }: ChunkOptions) {
+ }: BaseChunkOptions) {
if (overlap > size) {
throw new Error(`Got a larger chunk overlap (${overlap}) than chunk size ` + `(${size}), should be smaller.`);
}
+ if (keepSeparator !== undefined && keepSeparator !== false) {
+ // Runtime warning for deprecated usage
+ console.warn(
+ '[DEPRECATION] `keepSeparator` is deprecated and will be removed after May 20th, 2025. Use `separatorPosition` instead.',
+ );
+ if (keepSeparator === 'end') separatorPosition = 'end';
+ else if (keepSeparator === 'start') separatorPosition = 'start';
+ else if (keepSeparator === true) separatorPosition = 'start';
+ }
this.size = size;
this.overlap = overlap;
this.lengthFunction = lengthFunction;
this.keepSeparator = keepSeparator;
+ this.separatorPosition = separatorPosition;
greptile
logic: keepSeparator is still being set even though it's deprecated, which could lead to inconsistent state between keepSeparator and separatorPosition
diff block
+// Mock data for the Earn feature
+
+type EarnState = {
+ enabled: boolean; // module installed?
+ allocation: number; // 0-100 %
+ totalBalance: bigint; // wei - represented as string for JSON transport
+ earningBalance: bigint; // wei - represented as string for JSON transport
+ apy: number; // e.g., 5.4 for 5.4%
+ lastSweep: string | null; // ISO date string
+ events: SweepEvent[];
+ configHash?: string; // The config hash used to enable the module
+};
+
+type SweepEvent = {
+ id: string;
+ timestamp: string; // ISO date string
+ amount: string; // wei
+ currency: string; // e.g., "USDC"
+ apyAtTime: number; // e.g., 5.4
+ status: 'success' | 'pending' | 'failed';
+ txHash?: string; // Optional transaction hash
+ failureReason?: string;
+};
+
+const MOCK_EARN_STATE_DISABLED: EarnState = {
+ enabled: false,
+ allocation: 30, // Default allocation
+ totalBalance: '10000000000', // 10,000 USDC (assuming 6 decimals for example)
+ earningBalance: '0',
+ apy: 5.4, // Example APY
+ lastSweep: null,
+ events: [],
+};
+
+const MOCK_EARN_STATE_ENABLED: EarnState = {
+ enabled: true,
+ allocation: 30,
+ totalBalance: '10000000000', // 10,000 USDC
+ earningBalance: '3000000000', // 3,000 USDC (30% of total)
+ apy: 5.4,
+ lastSweep: new Date(Date.now() - 3600 * 1000 * 2).toISOString(), // 2 hours ago
+ configHash: '0x123abc',
+ events: [
+ {
+ id: 'sweep1',
+ timestamp: new Date(Date.now() - 3600 * 1000 * 2).toISOString(),
+ amount: '1050000000', // 1050 USDC
+ currency: 'USDC',
+ apyAtTime: 5.2,
+ status: 'success',
+ txHash: '0xabc123...',
+ },
+ {
+ id: 'sweep2',
+ timestamp: new Date(Date.now() - 3600 * 1000 * 24 * 3).toISOString(), // 3 days ago
+ amount: '500000000', // 500 USDC
+ currency: 'USDC',
+ apyAtTime: 5.1,
+ status: 'success',
+ txHash: '0xdef456...',
+ },
+ {
+ id: 'sweep3',
+ timestamp: new Date(Date.now() - 3600 * 1000 * 24 * 1).toISOString(), // 1 day ago
+ amount: '200000000', // 200 USDC
+ currency: 'USDC',
+ apyAtTime: 5.3,
+ status: 'failed',
+ failureReason: 'Insufficient gas for relay.',
+ },
+ ],
+};
+
+// Store current state in memory for mocks
+let currentEarnState: EarnState = MOCK_EARN_STATE_DISABLED;
+
+// Functions to manipulate mock state (would be replaced by actual API calls)
+function MOCK_ENABLE_MODULE(configHash: string): Promise<void> {
+ console.log(`MOCK: Enabling earn module for safe with configHash: ${configHash}`);
+ currentEarnState = { ...MOCK_EARN_STATE_ENABLED, allocation: currentEarnState.allocation, configHash };
+ return Promise.resolve();
+}
+
+function MOCK_DISABLE_MODULE(): Promise<void> {
+ console.log('MOCK: Disabling earn module');
+ currentEarnState = { ...MOCK_EARN_STATE_DISABLED, allocation: currentEarnState.allocation };
+ return Promise.resolve();
+}
+
+function MOCK_SET_ALLOCATION(percentage: number): Promise<void> {
+ console.log(`MOCK: Setting allocation to ${percentage}%`);
+ if (percentage < 0 || percentage > 100) {
+ return Promise.reject(new Error("Allocation must be between 0 and 100"));
+ }
+ currentEarnState.allocation = percentage;
+ // Simulate recalculation of earningBalance if enabled
+ if (currentEarnState.enabled) {
+ const total = BigInt(currentEarnState.totalBalance);
+ currentEarnState.earningBalance = ((total * BigInt(percentage)) / 100n).toString();
+ }
+ return Promise.resolve();
+}
+
+function MOCK_GET_EARN_STATE(): Promise<EarnState> {
+ console.log('MOCK: Getting earn state', currentEarnState);
+ // Simulate APY fluctuation
+ currentEarnState.apy = parseFloat((5.0 + Math.random() * 0.5).toFixed(2));
+ return Promise.resolve(currentEarnState);
+}
+
+function MOCK_ADD_SWEEP_EVENT(event: Omit<SweepEvent, 'id' | 'timestamp' | 'apyAtTime'>): Promise<SweepEvent> {
+ console.log('MOCK: Adding sweep event', event);
+ const newEvent: SweepEvent = {
+ ...event,
+ id: `sweep${currentEarnState.events.length + 1}`,
+ timestamp: new Date().toISOString(),
+ apyAtTime: currentEarnState.apy,
+ };
+ currentEarnState.events.unshift(newEvent); // Add to the beginning of the list
+ if (event.status === 'success') {
+ currentEarnState.earningBalance = (BigInt(currentEarnState.earningBalance) + BigInt(event.amount)).toString();
+ currentEarnState.lastSweep = newEvent.timestamp;
greptile
logic: Success events modify earningBalance but not totalBalance. This creates inconsistent state where totalBalance doesn't reflect all transactions.
suggested fix
currentEarnState.events.unshift(newEvent); // Add to the beginning of the list
if (event.status === 'success') {
+ currentEarnState.totalBalance = (BigInt(currentEarnState.totalBalance) + BigInt(event.amount)).toString();
currentEarnState.earningBalance = (BigInt(currentEarnState.earningBalance) + BigInt(event.amount)).toString();
currentEarnState.lastSweep = newEvent.timestamp;
diff block
+import { Knex } from "knex";
+
+import { TableName } from "../schemas";
+
+export async function up(knex: Knex): Promise<void> {
+ const hasGatewayIdColumn = await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "gatewayId");
+
+ if (!hasGatewayIdColumn) {
+ await knex.schema.alterTable(TableName.IdentityKubernetesAuth, (table) => {
+ table.uuid("gatewayId").nullable();
+ table.foreign("gatewayId").references("id").inTable(TableName.Gateway).onDelete("SET NULL");
greptile
logic: SET NULL on delete could leave kubernetes auth entries in an inconsistent state if they require a gateway to function. Consider if CASCADE or RESTRICT would be more appropriate
diff block
return nil
}
+
+func handleNetworkDoesNotExist(ctx context.Context,
+ networkName string,
+ c *Client,
+ p *tea.Program,
+ errChan chan error,
+) {
+ _, err := c.client.NetworkCreate(ctx, networkName, network.CreateOptions{
+ Driver: "bridge",
+ })
+ if err != nil {
+ p.Send(multispinner.ProcessState{
+ Icon: style.CrossIcon.Render(),
+ Type: "network",
+ Name: networkName,
+ State: "creating",
+ Detail: err.Error(),
+ Done: true,
+ })
+ errChan <- eris.Wrapf(err, "Failed to create network %s", networkName)
+ }
greptile
logic: Function continues execution after sending error, which could lead to inconsistent state. Should return after sending error.
suggested fix
if err != nil {
p.Send(multispinner.ProcessState{
Icon: style.CrossIcon.Render(),
Type: "network",
Name: networkName,
State: "creating",
Detail: err.Error(),
Done: true,
})
errChan <- eris.Wrapf(err, "Failed to create network %s", networkName)
+ return
}
diff block
+use std::{fmt, sync::Mutex};
+
+use log::*;
+use magicblock_rpc_client::{
+ MagicBlockRpcClientError, MagicBlockSendTransactionConfig,
+ MagicblockRpcClient,
+};
+use solana_pubkey::Pubkey;
+use solana_sdk::{
+ address_lookup_table as alt,
+ address_lookup_table::state::{
+ LookupTableMeta, LOOKUP_TABLE_MAX_ADDRESSES,
+ },
+ clock::Slot,
+ commitment_config::CommitmentLevel,
+ signature::{Keypair, Signature},
+ signer::Signer,
+ slot_hashes::MAX_ENTRIES,
+ transaction::Transaction,
+};
+
+use crate::{
+ derive_keypair,
+ error::{TableManiaError, TableManiaResult},
+};
+
+/// Determined via trial and error. The keys themselves take up
+/// 27 * 32 bytes = 864 bytes.
+pub const MAX_ENTRIES_AS_PART_OF_EXTEND: u64 = 27;
+
+#[derive(Debug)]
+pub enum LookupTable {
+ Active {
+ derived_auth: Keypair,
+ table_address: Pubkey,
+ pubkeys: Mutex<Vec<Pubkey>>,
+ creation_slot: u64,
+ creation_sub_slot: u64,
+ init_signature: Signature,
+ extend_signatures: Vec<Signature>,
+ },
+ Deactivated {
+ derived_auth: Keypair,
+ table_address: Pubkey,
+ deactivation_slot: u64,
+ deactivate_signature: Signature,
+ },
+}
+
+impl fmt::Display for LookupTable {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Active {
+ derived_auth,
+ table_address,
+ pubkeys,
+ creation_slot,
+ creation_sub_slot,
+ init_signature,
+ extend_signatures,
+ } => {
+ let comma_separated_pubkeys = pubkeys
+ .lock()
+ .expect("pubkeys mutex poisoned")
+ .iter()
+ .map(|x| x.to_string())
+ .collect::<Vec<_>>()
+ .join(", ");
+ let comma_separated_sigs = extend_signatures
+ .iter()
+ .map(|x| x.to_string())
+ .collect::<Vec<_>>()
+ .join(", ");
+ write!(
+ f,
+ "LookupTable: Active {{
+ derived_auth: {}
+ table_address: {}
+ pubkeys: {}
+ creation_slot: {}
+ creation_sub_slot: {}
+ init_signature: {}
+ extend_signatures: {}
+}}",
+ derived_auth.pubkey(),
+ table_address,
+ comma_separated_pubkeys,
+ creation_slot,
+ creation_sub_slot,
+ init_signature,
+ comma_separated_sigs
+ )
+ }
+ Self::Deactivated {
+ derived_auth,
+ table_address,
+ deactivation_slot,
+ deactivate_signature,
+ } => {
+ write!(
+ f,
+ "LookupTable: Deactivated {{ derived_auth: {}, table_address: {}, deactivation_slot: {}, deactivate_signature: {} }}",
+ derived_auth.pubkey(),
+ table_address,
+ deactivation_slot,
+ deactivate_signature,
+ )
+ }
+ }
+ }
+}
+
+impl LookupTable {
+ pub fn derived_auth(&self) -> &Keypair {
+ match self {
+ Self::Active { derived_auth, .. } => derived_auth,
+ Self::Deactivated { derived_auth, .. } => derived_auth,
+ }
+ }
+ pub fn table_address(&self) -> &Pubkey {
+ match self {
+ Self::Active { table_address, .. } => table_address,
+ Self::Deactivated { table_address, .. } => table_address,
+ }
+ }
+
+ /// All pubkeys requested, no matter of the `reqid`.
+ /// The same pubkey might be included twice if requested with different `reqid`.
+ pub fn pubkeys(&self) -> Option<Vec<Pubkey>> {
+ match self {
+ Self::Active { pubkeys, .. } => {
+ Some(pubkeys.lock().expect("pubkeys mutex poisoned").to_vec())
+ }
+ Self::Deactivated { .. } => None,
+ }
+ }
+
+ pub fn creation_slot(&self) -> Option<u64> {
+ match self {
+ Self::Active { creation_slot, .. } => Some(*creation_slot),
+ Self::Deactivated { .. } => None,
+ }
+ }
+
+ pub fn has_more_capacity(&self) -> bool {
+ self.pubkeys()
+ .is_some_and(|x| x.len() < LOOKUP_TABLE_MAX_ADDRESSES)
+ }
+
+ pub fn contains(&self, pubkey: &Pubkey, _reqid: u64) -> bool {
+ match self {
+ Self::Active { pubkeys, .. } => pubkeys
+ .lock()
+ .expect("pubkeys mutex poisoned")
+ .contains(pubkey),
+ Self::Deactivated { .. } => false,
+ }
+ }
+
+ /// Returns `true` if the we requested to deactivate this table.
+ /// NOTE: this doesn't mean that the deactivation perios passed, thus
+ /// the table could still be considered _deactivating_ on chain.
+ pub fn deactivate_triggered(&self) -> bool {
+ use LookupTable::*;
+ matches!(self, Deactivated { .. })
+ }
+
+ pub fn is_active(&self) -> bool {
+ use LookupTable::*;
+ matches!(self, Active { .. })
+ }
+
+ pub fn derive_keypair(
+ authority: &Keypair,
+ slot: Slot,
+ sub_slot: Slot,
+ ) -> Keypair {
+ derive_keypair::derive_keypair(authority, slot, sub_slot)
+ }
+
+ /// Initializes an address lookup table deriving its authority from the provided
+ /// [authority] keypair. The table is extended with the provided [pubkeys].
+ /// The [authority] keypair pays for the transaction.
+ ///
+ /// - **rpc_client**: RPC client to use for sending transactions
+ /// - **authority**: Keypair to derive the authority of the lookup table
+ /// - **latest_slot**: the on chain slot at which we are creating the table
+ /// - **sub_slot**: a bump to allow creating multiple lookup tables with the same authority
+ /// at the same slot
+ /// - **pubkeys**: to extend the lookup table respecting respecting
+ /// solana_sdk::address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES]
+ /// after it is initialized
+ /// - **reqid**: id of the request adding the pubkeys
+ pub async fn init(
+ rpc_client: &MagicblockRpcClient,
+ authority: &Keypair,
+ latest_slot: Slot,
+ sub_slot: Slot,
+ pubkeys: &[Pubkey],
+ _reqid: u64,
+ ) -> TableManiaResult<Self> {
+ check_max_pubkeys(pubkeys)?;
+
+ let derived_auth =
+ Self::derive_keypair(authority, latest_slot, sub_slot);
+
+ let (create_ix, table_address) = alt::instruction::create_lookup_table(
+ derived_auth.pubkey(),
+ authority.pubkey(),
+ latest_slot,
+ );
+
+ let end = pubkeys.len().min(LOOKUP_TABLE_MAX_ADDRESSES);
+ let extend_ix = alt::instruction::extend_lookup_table(
+ table_address,
+ derived_auth.pubkey(),
+ Some(authority.pubkey()),
+ pubkeys[..end].to_vec(),
+ );
+
+ let ixs = vec![create_ix, extend_ix];
+ let latest_blockhash = rpc_client.get_latest_blockhash().await?;
+ let tx = Transaction::new_signed_with_payer(
+ &ixs,
+ Some(&authority.pubkey()),
+ &[authority, &derived_auth],
+ latest_blockhash,
+ );
+
+ let outcome = rpc_client
+ .send_transaction(&tx, &Self::get_commitment(rpc_client))
+ .await?;
+ let (signature, error) = outcome.into_signature_and_error();
+ if let Some(error) = &error {
+ error!(
+ "Error initializing lookup table: {:?} ({})",
+ error, signature
+ );
+ return Err(MagicBlockRpcClientError::SentTransactionError(
+ error.clone(),
+ signature,
+ )
+ .into());
+ }
+
+ Ok(Self::Active {
+ derived_auth,
+ table_address,
+ pubkeys: Mutex::new(pubkeys.to_vec()),
+ creation_slot: latest_slot,
+ creation_sub_slot: sub_slot,
+ init_signature: signature,
+ extend_signatures: vec![],
+ })
+ }
+
+ fn get_commitment(
+ rpc_client: &MagicblockRpcClient,
+ ) -> MagicBlockSendTransactionConfig {
+ use CommitmentLevel::*;
+ match rpc_client.commitment_level() {
+ Processed => MagicBlockSendTransactionConfig::ensure_processed(),
+ Confirmed | Finalized => {
+ MagicBlockSendTransactionConfig::ensure_committed()
+ }
+ }
+ }
+
+ /// Extends this lookup table with the provided [pubkeys].
+ /// The transaction is signed with the [Self::derived_auth].
+ ///
+ /// - **rpc_client**: RPC client to use for sending the extend transaction
+ /// - **authority**: payer for the the extend transaction
+ /// - **pubkeys**: to extend the lookup table with
+ /// - **reqid**: id of the request adding the pubkeys
+ pub async fn extend(
+ &self,
+ rpc_client: &MagicblockRpcClient,
+ authority: &Keypair,
+ extra_pubkeys: &[Pubkey],
+ _reqid: u64,
+ ) -> TableManiaResult<()> {
+ use LookupTable::*;
+
+ check_max_pubkeys(extra_pubkeys)?;
+
+ let pubkeys = match self {
+ Active { pubkeys, .. } => pubkeys,
+ Deactivated { .. } => {
+ return Err(TableManiaError::CannotExtendDeactivatedTable(
+ *self.table_address(),
+ ));
+ }
+ };
+ let extend_ix = alt::instruction::extend_lookup_table(
+ *self.table_address(),
+ self.derived_auth().pubkey(),
+ Some(authority.pubkey()),
+ extra_pubkeys.to_vec(),
+ );
+
+ let ixs = vec![extend_ix];
+ let latest_blockhash = rpc_client.get_latest_blockhash().await?;
+ let tx = Transaction::new_signed_with_payer(
+ &ixs,
+ Some(&authority.pubkey()),
+ &[authority, self.derived_auth()],
+ latest_blockhash,
+ );
+
+ let outcome = rpc_client
+ .send_transaction(&tx, &Self::get_commitment(rpc_client))
+ .await?;
+ let (signature, error) = outcome.into_signature_and_error();
+ if let Some(error) = &error {
+ error!("Error extending lookup table: {:?} ({})", error, signature);
+ return Err(MagicBlockRpcClientError::SentTransactionError(
+ error.clone(),
+ signature,
+ )
+ .into());
+ } else {
+ pubkeys
+ .lock()
+ .expect("pubkeys mutex poisoned")
+ .extend(extra_pubkeys);
+ }
+
+ Ok(())
+ }
+
+ /// Extends this lookup table with the portion of the provided [pubkeys] that
+ /// fits into the table respecting [solana_sdk::address_lookup_table::state::LOOKUP_TABLE_MAX_ADDRESSES].
+ ///
+ /// The transaction is signed with the [Self::derived_auth].
+ ///
+ /// - **rpc_client**: RPC client to use for sending the extend transaction
+ /// - **authority**: payer for the the extend transaction
+ /// - **pubkeys**: to extend the lookup table with
+ /// - **reqid**: id of the request adding the pubkeys
+ ///
+ /// Returns: the pubkeys that were added to the table
+ pub async fn extend_respecting_capacity(
+ &self,
+ rpc_client: &MagicblockRpcClient,
+ authority: &Keypair,
+ pubkeys: &[Pubkey],
+ reqid: u64,
+ ) -> TableManiaResult<Vec<Pubkey>> {
+ let Some(len) = self.pubkeys().map(|x| x.len()) else {
+ return Err(TableManiaError::CannotExtendDeactivatedTable(
+ *self.table_address(),
+ ));
+ };
+ let remaining_capacity = LOOKUP_TABLE_MAX_ADDRESSES.saturating_sub(len);
+ if remaining_capacity == 0 {
+ return Ok(vec![]);
+ }
+
+ let storing = if pubkeys.len() >= remaining_capacity {
+ let (storing, _) = pubkeys.split_at(remaining_capacity);
+ storing
+ } else {
+ pubkeys
+ };
+
+ let res = self.extend(rpc_client, authority, storing, reqid).await;
+ res.map(|_| storing.to_vec())
+ }
+
+ /// Deactivates this lookup table.
+ ///
+ /// - **rpc_client**: RPC client to use for sending the deactivate transaction
+ /// - **authority**: pays for the the deactivate transaction
+ pub async fn deactivate(
+ &mut self,
+ rpc_client: &MagicblockRpcClient,
+ authority: &Keypair,
+ ) -> TableManiaResult<()> {
+ let deactivate_ix = alt::instruction::deactivate_lookup_table(
+ *self.table_address(),
+ self.derived_auth().pubkey(),
+ );
+ let ixs = vec![deactivate_ix];
+ let latest_blockhash = rpc_client.get_latest_blockhash().await?;
+ let tx = Transaction::new_signed_with_payer(
+ &ixs,
+ Some(&authority.pubkey()),
+ &[authority, self.derived_auth()],
+ latest_blockhash,
+ );
+
+ let outcome = rpc_client
+ .send_transaction(&tx, &Self::get_commitment(rpc_client))
+ .await?;
+ let (signature, error) = outcome.into_signature_and_error();
+ if let Some(error) = &error {
+ error!(
+ "Error deactivating lookup table: {:?} ({})",
+ error, signature
greptile
logic: Error is logged but not propagated in deactivate(). This could mask failures and lead to inconsistent state
diff block
+import { useState, useEffect } from "react";
+import { List, Icon } from "@raycast/api";
+import { MediaDetails } from "./types";
+import { removeFromWatchlist, readWatchlist } from "./utils/watchlist";
+import MediaListItem from "./components/MediaListItem";
+import { searchID } from "./utils/requests";
+import SearchBarAccessory from "./components/SearchBarAccessory";
+import { showFailureToast } from "@raycast/utils";
+
+export default function Watchlist() {
+ const [watchlist, setWatchlist] = useState<MediaDetails[]>([]);
+ const [loadedItems, setLoadedItems] = useState<{ [key: string]: MediaDetails }>({});
+ const [viewType, setViewType] = useState("all");
+
+ useEffect(() => {
+ const loadWatchlist = async () => {
+ try {
+ const items = await readWatchlist();
+ setWatchlist(items as MediaDetails[]);
+ } catch (error) {
+ showFailureToast(error, { title: "Could not load watchlist" });
+ }
+ };
+ loadWatchlist();
+ }, []);
+
+ const handleRemove = async (imdbID: string) => {
+ await removeFromWatchlist(imdbID);
+ const updatedList = watchlist.filter((item) => item.imdbID !== imdbID);
+ setWatchlist(updatedList);
+ };
greptile
logic: No error handling for removeFromWatchlist failure. Could leave UI and storage in inconsistent state if removal fails.
suggested fix
const handleRemove = async (imdbID: string) => {
try {
await removeFromWatchlist(imdbID);
const updatedList = watchlist.filter((item) => item.imdbID !== imdbID);
setWatchlist(updatedList);
} catch (error) {
+ showFailureToast(error, { title: "Could not remove from watchlist" });
}
};
diff block
+/* eslint-disable @nx/workspace-no-hardcoded-colors */
+/* eslint-disable @nx/workspace-explicit-boolean-predicates-in-if */
+/* eslint-disable no-console */
+import { useTheme } from '@emotion/react';
+import React, { useState } from 'react';
+import { Session } from 'sip.js';
+import { IconArrowRight, IconCheck } from 'twenty-ui';
+
+interface TransferButtonProps {
+ session: Session | null;
+ type: 'attended' | 'blind';
+ sendDTMF: (tone: string) => void;
+}
+
+const TransferButton: React.FC<TransferButtonProps> = ({ type, sendDTMF }) => {
+ const [isTransferring, setIsTransferring] = useState(false);
+
+ const handleTransfer = () => {
+ const extension = window.prompt('Enter the extension to transfer to:');
+
+ console.log('Extension to transfer:', extension);
+ console.log('Type of transfer:', type);
+
+ if (extension) {
+ if (type === 'attended') {
+ sendDTMF(`${extension}`);
+ } else {
+ sendDTMF(`${extension}`);
+ }
+ setIsTransferring(true);
+ }
+ };
+
+ const handleCompleteTransfer = () => {
+ // const extension = window.prompt('Enter the extension to transfer to:');
+
+ setIsTransferring(false);
+ };
greptile
logic: handleCompleteTransfer lacks implementation for actually completing the transfer. Just setting isTransferring to false may leave the call in an inconsistent state.
diff block
+"use client";
+
+import { Row } from "@tanstack/react-table";
+import { useParams, usePathname, useRouter, useSearchParams } from "next/navigation";
+import { useCallback, useEffect, useState } from "react";
+
+import RefreshButton from "@/components/traces/refresh-button";
+import { columns, filters } from "@/components/traces/sessions-table/columns";
+import { useToast } from "@/lib/hooks/use-toast";
+import { SessionPreview, Trace } from "@/lib/traces/types";
+import { PaginatedResponse } from "@/lib/types";
+
+import { DataTable } from "../../ui/datatable";
+import DataTableFilter, { DataTableFilterList } from "../../ui/datatable-filter";
+import DateRangeFilter from "../../ui/date-range-filter";
+import TextSearchFilter from "../../ui/text-search-filter";
+
+type SessionRow = {
+ type: string;
+ data: SessionPreview | Trace;
+ subRows: SessionRow[];
+};
+
+interface SessionsTableProps {
+ onRowClick?: (rowId: string) => void;
+}
+
+export default function SessionsTable({ onRowClick }: SessionsTableProps) {
+ const { projectId } = useParams();
+ const searchParams = useSearchParams();
+ const pathName = usePathname();
+ const router = useRouter();
+ const { toast } = useToast();
+
+ const [focusedRowId, setFocusedRowId] = useState<string | undefined>(undefined);
+ const [sessions, setSessions] = useState<SessionRow[] | undefined>(undefined);
+
+ const defaultPageNumber = searchParams.get("pageNumber") ?? "0";
+ const defaultPageSize = searchParams.get("pageSize") ?? "50";
+ const [totalCount, setTotalCount] = useState<number>(0);
+ const pageNumber = parseInt(searchParams.get("pageNumber") ?? "0");
+ const pageSize = Math.max(parseInt(defaultPageSize), 1);
+ const pageCount = Math.ceil(totalCount / pageSize);
+ const filter = searchParams.get("filter");
+ const startDate = searchParams.get("startDate");
+ const endDate = searchParams.get("endDate");
+ const pastHours = searchParams.get("pastHours");
+ const textSearchFilter = searchParams.get("search");
+
+ const getSessions = useCallback(async () => {
+ try {
+ setSessions(undefined);
+ let queryFilter = searchParams.getAll("filter");
+
+ if (!pastHours && !startDate && !endDate) {
+ const sp = new URLSearchParams();
+ for (const [key, value] of Object.entries(searchParams)) {
+ if (key !== "pastHours") {
+ sp.set(key, value as string);
+ }
+ }
+ sp.set("pastHours", "24");
+ router.push(`${pathName}?${sp.toString()}`);
+ return;
+ }
+
+ const urlParams = new URLSearchParams();
+ urlParams.set("pageNumber", pageNumber.toString());
+ urlParams.set("pageSize", pageSize.toString());
+
+ queryFilter.forEach((filter) => urlParams.append("filter", filter));
+
+ if (pastHours != null) urlParams.set("pastHours", pastHours);
+ if (startDate != null) urlParams.set("startDate", startDate);
+ if (endDate != null) urlParams.set("endDate", endDate);
+
+ if (typeof textSearchFilter === "string" && textSearchFilter.length > 0) {
+ urlParams.set("search", textSearchFilter);
+ }
+
+ const url = `/api/projects/${projectId}/sessions?${urlParams.toString()}`;
+
+ const res = await fetch(url, {
+ method: "GET",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ });
+
+ if (!res.ok) {
+ throw new Error(`Failed to fetch sessions: ${res.status} ${res.statusText}`);
+ }
+
+ const data = (await res.json()) as PaginatedResponse<SessionPreview>;
+
+ setSessions(
+ data.items.map((s) => ({
+ type: "session",
+ data: s,
+ subRows: [],
+ }))
+ );
+
+ setTotalCount(data.totalCount);
+ } catch (error) {
+ toast({
+ title: "Failed to load sessions. Please try again.",
+ variant: "destructive",
+ });
+ // Set empty sessions to show error state
+ setSessions([]);
+ setTotalCount(0);
+ }
+ }, [
+ endDate,
+ pageNumber,
+ pageSize,
+ pastHours,
+ pathName,
+ projectId,
+ router,
+ searchParams,
+ startDate,
+ textSearchFilter,
+ filter,
+ toast,
+ ]);
+
+ const onPageChange = useCallback(
+ (pageNumber: number, pageSize: number) => {
+ const params = new URLSearchParams(searchParams);
+ params.set("pageNumber", pageNumber.toString());
+ params.set("pageSize", pageSize.toString());
+ router.push(`${pathName}?${params.toString()}`);
+ },
+ [pathName, router, searchParams]
+ );
+
+ const handleRowClick = useCallback(
+ async (row: Row<SessionRow>) => {
+ if (row.original.type === "trace") {
+ const params = new URLSearchParams(searchParams);
+ setFocusedRowId(row.original.data.id);
+ onRowClick?.(row.original.data.id);
+ params.set("selectedId", row.original.data.id);
+ router.push(`${pathName}?${params.toString()}`);
+ return;
+ }
+
+ row.toggleExpanded();
+
+ const filter = {
+ column: "session_id",
+ value: row.original.data.id,
+ operator: "eq",
+ };
+
+ const res = await fetch(
+ `/api/projects/${projectId}/traces?pageNumber=0&pageSize=50&filter=${JSON.stringify(filter)}`
+ );
greptile
logic: Missing error handling for trace fetch request. Could leave table in inconsistent state if request fails.
```suggestion
try {
const res = await fetch(
`/api/projects/${projectId}/traces?pageNumber=0&pageSize=50&filter=${JSON.stringify(filter)}`
);
if (!res.ok) {
+ throw new Error(`Failed to fetch traces: ${res.status} ${res.statusText}`);
}
```
diff block
echo 'if [ -f ~/.bashrc ]; then . ~/.bashrc; fi' >> ~/.profile
# Clean and reinstall dependencies to ensure platform compatibility
-echo "📦 Cleaning and reinstalling npm dependencies..."
+echo "📦 Cleaning and reinstalling dependencies..."
if [ -d "node_modules" ]; then
echo "Removing existing node_modules to ensure platform compatibility..."
rm -rf node_modules
rm -rf apps/sim/node_modules
rm -rf apps/docs/node_modules
fi
+# Ensure Bun cache directory exists and has correct permissions
+mkdir -p ~/.bun/cache
+chmod 700 ~/.bun ~/.bun/cache
+
# Install dependencies with platform-specific binaries
-npm install || {
- echo "⚠️ npm install had issues but continuing setup..."
+echo "Installing dependencies with Bun..."
+bun install || {
+ echo "⚠️ bun install had issues but continuing setup..."
}
greptile
logic: Silently continuing after bun install failures could leave the environment in an inconsistent state. Consider failing fast here instead.
suggested fix
bun install || {
+ echo "❌ bun install failed. Please check the errors above and try again."
+ exit 1
+}
diff block
+import type { MessageType, StorageThreadType, WorkflowRuns, TraceType } from '@mastra/core';
+import { MastraStorage, TABLE_MESSAGES, TABLE_THREADS } from '@mastra/core/storage';
+const TABLE_TRACES = 'traces';
+const TABLE_EVALS = 'evals';
+const TABLE_WORKFLOW_RUNS = 'workflow_runs';
+import type { StorageColumn, EvalRow, StorageGetMessagesArg, TABLE_NAMES } from '@mastra/core/storage';
+import type {
+ CheckHealthResponse,
+ ClientConfig,
+ CollectionSchema,
+ DescribeCollectionResponse,
+ FieldType,
+} from '@zilliz/milvus2-sdk-node';
+import { MilvusClient, DataType, IndexType, MetricType } from '@zilliz/milvus2-sdk-node';
+
+export class MilvusStorage extends MastraStorage {
+ private client: MilvusClient;
+ private loadedCollections: Set<string>;
+
+ constructor(
+ name: string,
+ addressOrConfig: ClientConfig | string,
+ ssl?: boolean,
+ username?: string,
+ password?: string,
+ ) {
+ try {
+ super({ name });
+ this.client = new MilvusClient(addressOrConfig, ssl, username, password);
+ this.loadedCollections = new Set<string>();
+ return this;
+ } catch (error) {
+ throw new Error('Failed to initialize Milvus client: ' + error);
+ }
+ }
+
+ checkHealth(): Promise<CheckHealthResponse> {
+ return this.client.checkHealth();
+ }
+
+ async describeTable({ tableName }: { tableName: string }): Promise<DescribeCollectionResponse> {
+ return this.client.describeCollection({ collection_name: tableName });
+ }
+
+ translateSchema(schema: Record<string, StorageColumn>): FieldType[] {
+ return Object.entries(schema).map(([name, column]) => {
+ let dataType: DataType;
+ let maxLength: number | undefined;
+
+ switch (column.type) {
+ case 'uuid':
+ dataType = DataType.VarChar;
+ maxLength = 36; // Standard UUID length
+ break;
+ case 'integer':
+ dataType = DataType.Int32;
+ break;
+ case 'bigint':
+ dataType = DataType.Int64;
+ break;
+ case 'text':
+ dataType = DataType.VarChar;
+ maxLength = 65535; // Default max length for text
+ break;
+ case 'timestamp':
+ dataType = DataType.Int64;
+ break;
+ case 'jsonb':
+ dataType = DataType.JSON;
+ break;
+ default:
+ dataType = DataType.VarChar; // Default to VarChar if type is unknown
+ maxLength = 255;
+ }
+
+ const fieldType: FieldType = {
+ name,
+ data_type: dataType,
+ is_primary_key: column.primaryKey ?? false,
+ nullable: column.nullable ?? true,
+ };
+
+ if (maxLength && dataType === DataType.VarChar) {
+ fieldType.max_length = maxLength;
+ }
+
+ return fieldType;
+ });
+ }
+
+ transformCollectionDescription(schema: CollectionSchema): Record<string, StorageColumn>[] {
+ const types: Record<string, string> = {
+ int64: 'bigint',
+ uuid: 'uuid',
+ int32: 'integer',
+ varchar: 'text',
+ float64: 'float',
+ floatvector: 'vector',
+ json: 'jsonb',
+ };
+
+ return schema.fields.map(field => ({
+ [field.name]: {
+ type: types[field.data_type.toString().toLowerCase()] as StorageColumn['type'],
+ nullable: field.nullable,
+ primaryKey: field.is_primary_key,
+ },
+ }));
+ }
+
+ async getTableSchema(tableName: TABLE_NAMES): Promise<Record<string, StorageColumn>[]> {
+ try {
+ const collection = await this.client.describeCollection({ collection_name: tableName });
+ return this.transformCollectionDescription(collection.schema);
+ } catch (error) {
+ throw new Error('Failed to get collection: ' + error);
+ }
+ }
+
+ /**
+ * Creates a table in Milvus with the given schema. An extra placeholder vector field is added to the schema. Milvus requires at least one vector field to be present in the collection schema.
+ *
+ * check this discussion thread for reference: https://github.com/milvus-io/milvus/discussions/34927
+ *
+ * @param tableName - The table name.
+ * @param schema - The schema of the table.
+ */
+ async createTable({
+ tableName,
+ schema,
+ }: {
+ tableName: TABLE_NAMES;
+ schema: Record<string, StorageColumn>;
+ }): Promise<void> {
+ try {
+ const fields = this.translateSchema(schema);
+
+ // Add a placeholder vector field - required by Milvus
+ fields.push({
+ name: 'vector_placeholder',
+ data_type: DataType.FloatVector,
+ dim: 2, // Smallest possible dimension
+ is_primary_key: false,
+ });
+
+ const response = await this.client.createCollection({
+ collection_name: tableName,
+ schema: fields,
+ });
+
+ if (response.error_code !== 'Success') {
+ throw new Error('Error status code: ' + response.reason);
+ }
+
+ // Creating index on placeholder vector field because milvus requires mandatory index on vector field
+ await this.client.createIndex({
+ collection_name: tableName,
+ field_name: 'vector_placeholder',
+ index_name: 'vector_idx',
+ index_type: IndexType.IVF_FLAT,
+ metric_type: MetricType.L2,
+ });
greptile
logic: Index creation on vector_placeholder lacks error handling - could leave collection in inconsistent state if index creation fails
diff block
+'use client'
+
+import { useRef, useState } from 'react'
+import { ChatMessage } from '../components/message/message'
+
+export function useChatStreaming() {
+ const [isStreamingResponse, setIsStreamingResponse] = useState(false)
+ const abortControllerRef = useRef<AbortController | null>(null)
+
+ const stopStreaming = (setMessages: React.Dispatch<React.SetStateAction<ChatMessage[]>>) => {
+ if (abortControllerRef.current) {
+ // Abort the fetch request
+ abortControllerRef.current.abort()
+ abortControllerRef.current = null
+
+ // Add a message indicating the response was stopped
+ setMessages((prev) => {
+ const lastMessage = prev[prev.length - 1]
+
+ // Only modify if the last message is from the assistant (as expected)
+ if (lastMessage && lastMessage.type === 'assistant') {
+ // Append a note that the response was stopped
+ const updatedContent =
+ lastMessage.content +
+ (lastMessage.content
+ ? '\n\n_Response stopped by user._'
+ : '_Response stopped by user._')
+
+ return [
+ ...prev.slice(0, -1),
+ { ...lastMessage, content: updatedContent, isStreaming: false },
+ ]
+ }
+
+ return prev
+ })
+
+ // Reset streaming state
+ setIsStreamingResponse(false)
+ }
+ }
+
+ const handleStreamedResponse = async (
+ response: Response,
+ setMessages: React.Dispatch<React.SetStateAction<ChatMessage[]>>,
+ setIsLoading: React.Dispatch<React.SetStateAction<boolean>>,
+ scrollToBottom: () => void,
+ userHasScrolled?: boolean
+ ) => {
+ const messageId = crypto.randomUUID()
+
+ // Set streaming state before adding the assistant message
+ setIsStreamingResponse(true)
+
+ // Add placeholder message
+ setMessages((prev) => [
+ ...prev,
+ {
+ id: messageId,
+ content: '',
+ type: 'assistant',
+ timestamp: new Date(),
+ isStreaming: true,
+ },
+ ])
+
+ // Stop showing loading indicator once streaming begins
+ setIsLoading(false)
+
+ // Ensure the response body exists and is a ReadableStream
+ const reader = response.body?.getReader()
+ if (reader) {
+ const decoder = new TextDecoder()
+ let done = false
+
+ try {
+ while (!done) {
+ // Check if aborted before each read
+ if (abortControllerRef.current === null) {
+ console.log('Stream reading aborted')
+ break
+ }
greptile
logic: The abort check happens after awaiting reader.read(), which could leave the stream in an inconsistent state. Consider moving abort check before the await.
diff block
+import { actions, afterMount, connect, kea, listeners, path, reducers } from 'kea'
+import api from 'lib/api'
+import { FEATURE_FLAGS } from 'lib/constants'
+import { featureFlagLogic } from 'lib/logic/featureFlagLogic'
+import { PersonPropType } from 'scenes/persons/person-utils'
+
+import type { chatListLogicType } from './chatListLogicType'
+
+export type Chat = {
+ id?: string
+ person_uuid?: string // email or user id, for dummy data
+ distinct_id?: string // email or user id, for dummy data
+ person?: PersonPropType
+ team: string // team id, for dummy data
+ title?: string | null
+ created_at: string
+ updated_at: string
+ source_url?: string | null
+ unread_count: number
+ messages: ChatMessage[]
+}
+
+export type ChatMessage = {
+ id?: string
+ content: string
+ created_at: string
+ read: boolean
+ is_assistant: boolean
+}
+
+export const chatListLogic = kea<chatListLogicType>([
+ path(['products', 'chat', 'frontend', 'chatListLogic']),
+ connect(() => ({
+ values: [featureFlagLogic, ['featureFlags']],
+ })),
+ actions({
+ setSelectedChatId: (selectedChatId: string | null) => ({ selectedChatId }),
+ setChats: (chats: Chat[]) => ({ chats }),
+ sendMessage: (message: string) => ({ message }),
+ setMessage: (message: string) => ({ message }),
+ loadChats: true,
+ loadChat: (chatId: string) => ({ chatId }),
+ createZendDeskTicket: (subject: string, uuid: string, message: string) => ({ subject, uuid, message }),
+ }),
+ reducers({
+ chats: [
+ [] as Chat[],
+ {
+ setChats: (_, { chats }) => chats,
+ },
+ ],
+ selectedChatId: [
+ null as string | null,
+ {
+ setSelectedChatId: (_, { selectedChatId }) => selectedChatId,
+ },
+ ],
+ message: [
+ '',
+ {
+ setMessage: (_, { message }) => message,
+ },
+ ],
+ }),
+ listeners(({ values, actions }) => ({
+ sendMessage: async ({ message }) => {
+ if (values.selectedChatId) {
+ const chat = values.chats.find((chat) => chat.id === values.selectedChatId)
+ if (chat) {
+ const newMessage: ChatMessage = {
+ content: message,
+ created_at: new Date().toISOString(),
+ read: true,
+ is_assistant: true,
+ }
greptile
logic: Message is marked as read and is_assistant=true before server confirmation. Could lead to inconsistent state if API call fails.
suggested fix
const newMessage: ChatMessage = {
content: message,
created_at: new Date().toISOString(),
read: true,
+ is_assistant: false,
}
diff block
+from collections import defaultdict
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Set
+
+import numpy as np
+from sklearn.cluster import SpectralClustering # type: ignore
+from thefuzz import fuzz # type: ignore
+
+from onyx.db.document import update_document_kg_info
+from onyx.db.engine import get_session_with_current_tenant
+from onyx.db.entities import add_entity
+from onyx.db.entities import delete_entities_by_id_names
+from onyx.db.entities import get_entities_by_grounding
+from onyx.db.entity_type import get_determined_grounded_entity_types
+from onyx.db.relationships import add_relationship
+from onyx.db.relationships import add_relationship_type
+from onyx.db.relationships import delete_relationship_types_by_id_names
+from onyx.db.relationships import delete_relationships_by_id_names
+from onyx.db.relationships import get_all_relationship_types
+from onyx.db.relationships import get_all_relationships
+from onyx.kg.models import KGGroundingType
+from onyx.kg.models import KGStage
+from onyx.kg.utils.embeddings import encode_string_batch
+from onyx.llm.factory import get_default_llms
+from onyx.utils.logger import setup_logger
+
+logger = setup_logger()
+
+
+def _create_ge_determined_entity_map() -> Dict[str, List[str]]:
+ """Create a mapping of entity type ID names to their grounding determination instructions.
+
+ Returns:
+ Dictionary mapping entity type ID names to their list of grounding determination instructions
+ """
+ ge_determined_entity_map: Dict[str, List[str]] = defaultdict(list)
+
+ with get_session_with_current_tenant() as db_session:
+ determined_entities = get_determined_grounded_entity_types(db_session)
+
+ for entity_type in determined_entities:
+ if entity_type.entity_values: # Extra safety check
+ ge_determined_entity_map[entity_type.id_name] = (
+ entity_type.entity_values
+ )
+
+ return ge_determined_entity_map
+
+
+def _cluster_relationships(
+ relationship_data: List[dict], n_clusters: int = 3, batch_size: int = 12
+) -> Dict[int, List[str]]:
+ """
+ Cluster relationships using their embeddings.
+
+ Args:
+ relationship_data: List of dicts with 'name' and 'cluster_count'
+ n_clusters: Number of clusters to create
+ batch_size: Size of batches for embedding requests
+
+ Returns:
+ Dictionary mapping cluster IDs to lists of relationship names
+ """
+
+ # TODO: This is TEMP for the pre-defined relationships.
+ # if len(relationship_data) < n_clusters:
+ if len(relationship_data) < n_clusters:
+ logger.warning(
+ "Not enough relationships to cluster. Returning each relationship as its own cluster."
+ )
+ return {i: [rel["name"]] for (i, rel) in enumerate(relationship_data)}
+
+ train_data = []
+ rel_names = []
+
+ # Process relationships in batches
+ for i in range(0, len(relationship_data), batch_size):
+ batch = relationship_data[i : i + batch_size]
+ batch_names = [
+ rel["name"].replace("_", " ") for rel in batch
+ ] # better for LLM to have spaces between words
+
+ # Get embeddings for the entire batch at once
+ batch_embeddings = encode_string_batch(batch_names)
+
+ # Add embeddings and corresponding data
+ for rel, embedding in zip(batch, batch_embeddings):
+ count = int(rel["cluster_count"]) or 1
+ # Add the relationship name 'count' times
+ for _ in range(count):
+ train_data.append(embedding)
+ rel_names.append(rel["name"])
+
+ # Convert to numpy arrays
+ X = np.array(train_data)
+
+ # Perform clustering
+ # clustering = KMeans(n_clusters=n_clusters, random_state=42)
+ clustering = SpectralClustering(n_clusters=n_clusters, random_state=42)
+ clusters = clustering.fit_predict(X)
+
+ # Group relationship names by cluster
+ cluster_groups: Dict[int, List[str]] = defaultdict(list)
+ for rel_name, cluster_id in zip(rel_names, clusters):
+ if rel_name not in cluster_groups[cluster_id]:
+ cluster_groups[cluster_id].append(rel_name)
+
+ return dict(cluster_groups)
+
+
+def _cluster_entities(
+ entity_data: List[dict], n_clusters: int = 3, batch_size: int = 12
+) -> Dict[int, List[str]]:
+ """
+ Cluster entities using their embeddings.
+
+ Args:
+ entity_data: List of dicts with 'name' and 'cluster_count'
+ n_clusters: Number of clusters to create
+ batch_size: Size of batches for embedding requests
+
+ Returns:
+ Dictionary mapping cluster IDs to lists of entity names
+ """
+
+ if len(entity_data) < n_clusters:
+ logger.warning(
+ "Not enough entities to cluster. Returning each entity as its own cluster."
+ )
+ return {
+ i: [ent["name"] for ent in entity_data] for i in range(len(entity_data))
+ }
+
+ train_data = []
+ entity_names = []
+
+ # Process entities in batches
+ for i in range(0, len(entity_data), batch_size):
+ batch = entity_data[i : i + batch_size]
+ batch_names = [
+ ent["name"].replace("_", " ") for ent in batch
+ ] # use spaces between words for LLM
+
+ # Get embeddings for the entire batch at once
+ batch_embeddings = encode_string_batch(batch_names)
+
+ # Add embeddings and corresponding data
+ for ent, embedding in zip(batch, batch_embeddings):
+ count = int(ent["cluster_count"]) or 1
+
+ # Add the entity name 'count' times
+ for _ in range(count):
+ entity_names.append(ent["name"])
+ train_data.append(embedding)
+
+ # Convert to numpy arrays
+ X = np.array(train_data)
+
+ # Perform clustering
+ # clustering = KMeans(n_clusters=n_clusters, random_state=42)
+ clustering = SpectralClustering(n_clusters=n_clusters, random_state=42)
+ clusters = clustering.fit_predict(X)
+
+ # Group entity names by cluster
+ cluster_groups: Dict[int, List[str]] = defaultdict(list)
+ for ent_name, cluster_id in zip(entity_names, clusters):
+ if ent_name not in cluster_groups[cluster_id]:
+ cluster_groups[cluster_id].append(ent_name)
+
+ return dict(cluster_groups)
+
+
+def _create_relationship_type_mapping(
+ full_clustering_results: Dict[str, Dict[str, Dict[int, Dict[str, Any]]]],
+ relationship_mapping: Dict[str, Dict[str, List[dict]]],
+) -> tuple[Dict[str, str], Dict[str, int]]:
+ """
+ Create a mapping between original relationship types and their clustered versions.
+
+ Args:
+ full_clustering_results: Clustering results with cluster names
+ relationship_mapping: Original relationship types organized by source/target
+
+ Returns:
+ Dictionary mapping original relationship type ID to clustered relationship type ID
+ """
+ relationship_type_replacements: Dict[str, str] = {}
+ reverse_relationship_type_replacements_count: Dict[str, int] = defaultdict(int)
+
+ for source_type, target_dict in relationship_mapping.items():
+ for target_type, rel_types in target_dict.items():
+ # Get clusters for this source/target pair
+ clusters = full_clustering_results.get(source_type, {}).get(target_type, {})
+
+ for cluster_id, cluster_info in clusters.items():
+ cluster_name = cluster_info["cluster_name"]
+ for rel_name in cluster_info["relationships"]:
+ original_id = f"{source_type}__{rel_name.lower()}__{target_type}"
+ clustered_id = (
+ f"{source_type}__{cluster_name.lower()}__{target_type}"
+ )
+ relationship_type_replacements[original_id] = clustered_id
+ reverse_relationship_type_replacements_count[clustered_id] += len(
+ cluster_info["relationships"]
+ )
+
+ return relationship_type_replacements, reverse_relationship_type_replacements_count
+
+
+def _create_entity_mapping(
+ full_entity_clustering_results: Dict[str, Dict[int, Dict[str, Any]]],
+ entity_mapping: Dict[str, List[dict]],
+) -> tuple[Dict[str, str], Dict[str, int]]:
+ """
+ Create a mapping between original entities and their clustered versions.
+
+ Args:
+ full_entity_clustering_results: Clustering results with cluster names
+ entity_mapping: Original entities organized by entity type
+
+ Returns:
+ Dictionary mapping original entity ID to clustered entity ID
+ """
+ entity_replacements: Dict[str, str] = {}
+ reverse_entity_replacements_count: Dict[str, int] = defaultdict(int)
+
+ for entity_type, clusters in full_entity_clustering_results.items():
+ for cluster_id, cluster_info in clusters.items():
+ cluster_name = cluster_info["cluster_name"]
+ for entity_name in cluster_info["entities"]:
+ # Skip wildcard entities
+ if entity_name == "*":
+ continue
+
+ original_id = f"{entity_type}:{entity_name}"
+ clustered_id = f"{entity_type}:{cluster_name.title()}"
+ entity_replacements[original_id] = clustered_id
+ reverse_entity_replacements_count[clustered_id] += len(
+ cluster_info["entities"]
+ )
+ return entity_replacements, reverse_entity_replacements_count
+
+
+def _create_relationship_mapping(
+ relationship_type_replacements: Dict[str, str],
+ reverse_relationship_type_replacements_count: Dict[str, int],
+ entity_replacements: Dict[str, str],
+ reverse_entity_replacements_count: Dict[str, int],
+ relationships: List[
+ Any
+ ], # This would be List[KGRelationship] but avoiding the import
+) -> tuple[Dict[str, str], Dict[str, int]]:
+ """
+ Create a mapping between original relationships and their clustered versions,
+ taking into account both clustered relationship types and clustered entities.
+
+ Args:
+ relationship_type_replacements: Mapping of original to clustered relationship type IDs
+ entity_replacements: Mapping of original to clustered entity IDs
+ relationships: List of relationships from the database
+
+ Returns:
+ Dictionary mapping original relationship ID to clustered relationship ID
+ """
+ relationship_replacements: Dict[str, str] = {}
+ reverse_relationship_replacements_count: Dict[str, int] = defaultdict(int)
+
+ for rel in relationships:
+ # Skip if source or target is a wildcard
+
+ # Get the clustered entities (if they exist)
+ source_node = entity_replacements.get(rel.source_node, rel.source_node)
+ target_node = entity_replacements.get(rel.target_node, rel.target_node)
+
+ rel.source_document
+
+ # Create the relationship type ID
+ source_type = rel.source_node.split(":")[0]
+ target_type = rel.target_node.split(":")[0]
+ rel_type_id = f"{source_type}__{rel.type.lower()}__{target_type}"
+
+ # Get the clustered relationship type (if it exists)
+ clustered_rel_type_id = relationship_type_replacements.get(
+ rel_type_id, rel_type_id
+ )
+
+ # Extract the relationship name from the clustered type ID
+ _, rel_name, _ = clustered_rel_type_id.split("__")
+
+ # Create the original and clustered relationship IDs
+ original_id = f"{rel.source_node}__{rel.type.lower()}__{rel.target_node}"
+ clustered_id = f"{source_node}__{rel_name}__{target_node}"
+
+ relationship_replacements[original_id] = clustered_id
+ reverse_relationship_replacements_count[clustered_id] += rel.occurrences or 1
+
+ return relationship_replacements, reverse_relationship_replacements_count
+
+
+def _match_ungrounded_ge_entities(
+ ungrounded_ge_entities: Dict[str, List[str]],
+ grounded_ge_entities: Dict[str, List[str]],
+ fuzzy_match_threshold: int = 80,
+) -> Dict[str, Dict[str, str]]:
+ """
+ Create a mapping for ungrounded entities by matching them to grounded entities
+ or previously processed ungrounded entities. First checks for containment relationships,
+ then falls back to fuzzy matching if no containment is found.
+
+ Args:
+ ungrounded_ge_entities: Dictionary mapping entity types to lists of ungrounded entity names
+ grounded_ge_entities: Dictionary mapping entity types to lists of grounded entity names
+ fuzzy_match_threshold: Threshold for fuzzy matching (0-100)
+
+ Returns:
+ Dictionary mapping entity types to dictionaries of {original_entity: matched_entity}
+ """
+ entity_match_mapping: Dict[str, Dict[str, str]] = defaultdict(dict)
+ processed_entities: Dict[str, Set[str]] = defaultdict(set)
+
+ # For each entity type
+ for entity_type, ungrounded_entities_list in ungrounded_ge_entities.items():
+ grounded_list = grounded_ge_entities.get(entity_type, [])
+
+ # Process each ungrounded entity
+ for ungrounded_entity in ungrounded_entities_list:
+ if ungrounded_entity == "*":
+ continue
+ best_match = None
+
+ # First check if ungrounded entity is contained in or contains any grounded entities
+ for grounded_entity in grounded_list:
+ if (
+ ungrounded_entity.lower() in grounded_entity.lower()
+ or grounded_entity.lower() in ungrounded_entity.lower()
+ ):
+ best_match = grounded_entity
+ break
+
+ # If no containment match with grounded entities, check previously processed ungrounded entities
+ if not best_match:
+ for processed_entity in processed_entities[entity_type]:
+ if (
+ ungrounded_entity.lower() in processed_entity.lower()
+ or processed_entity.lower() in ungrounded_entity.lower()
+ ):
+ best_match = processed_entity
+ break
+
+ # If still no match, fall back to fuzzy matching
+ if not best_match:
+ best_score = 0
+
+ # Try fuzzy matching with grounded entities
+ for grounded_entity in grounded_list:
+ score = fuzz.ratio(
+ ungrounded_entity.lower(), grounded_entity.lower()
+ )
+ if score > fuzzy_match_threshold and score > best_score:
+ best_match = grounded_entity
+ best_score = score
+
+ # Try fuzzy matching with previously processed ungrounded entities
+ if not best_match:
+ for processed_entity in processed_entities[entity_type]:
+ score = fuzz.ratio(
+ ungrounded_entity.lower(), processed_entity.lower()
+ )
+ if score > fuzzy_match_threshold and score > best_score:
+ best_match = processed_entity
+ best_score = score
+
+ # Record the mapping
+ if best_match:
+ entity_match_mapping[entity_type][ungrounded_entity] = best_match
+ else:
+ # No match found, this becomes a new unique entity
+ entity_match_mapping[entity_type][ungrounded_entity] = ungrounded_entity
+ processed_entities[entity_type].add(ungrounded_entity)
+
+ # Log the results
+ logger.info("Entity matching results:")
+ for entity_type, mappings in entity_match_mapping.items():
+ logger.info(f"\nEntity type: {entity_type}")
+ for original, matched in mappings.items():
+ if original != matched:
+ logger.info(f" Mapped: {original} -> {matched}")
+ else:
+ logger.info(f" New unique entity: {original}")
+
+ return entity_match_mapping
+
+
+def _match_determined_ge_entities(
+ determined_ge_entity_map: Dict[str, List[str]],
+ determined_ge_entities_by_type: Dict[str, List[str]],
+ fuzzy_match_threshold: int = 80,
+) -> Dict[str, Dict[str, str]]:
+ """
+ Create a mapping for determined entities by matching them to grounded entities
+ or previously processed ungrounded entities. First checks for containment relationships,
+ then falls back to fuzzy matching if no containment is found.
+
+ Args:
+ ungrounded_ge_entities: Dictionary mapping entity types to lists of ungrounded entity names
+ grounded_ge_entities: Dictionary mapping entity types to lists of grounded entity names
+ fuzzy_match_threshold: Threshold for fuzzy matching (0-100)
+
+ Returns:
+ Dictionary mapping entity types to dictionaries of {original_entity: matched_entity}
+ """
+ determined_entity_match_mapping: Dict[str, Dict[str, str]] = defaultdict(dict)
+
+ # For each entity type
+ for entity_type, determined_entities_list in determined_ge_entity_map.items():
+ ungrounded_list = determined_ge_entities_by_type.get(entity_type, [])
+
+ # Process each ungrounded entity
+ for ungrounded_entity in ungrounded_list:
+ if ungrounded_entity == "*":
+ continue
+ best_match = None
+
+ # First check if ungrounded entity is contained in or contains any grounded entities
+ for grounded_entity in determined_entities_list:
+ if (
+ ungrounded_entity.lower() in grounded_entity.lower()
+ or grounded_entity.lower() in ungrounded_entity.lower()
+ ):
+ best_match = grounded_entity
+ break
+
+ # If still no match, fall back to fuzzy matching
+ if not best_match:
+ best_score = 0
+
+ # Try fuzzy matching with grounded entities
+ for grounded_entity in determined_entities_list:
+ score = fuzz.ratio(
+ ungrounded_entity.lower(), grounded_entity.lower()
+ )
+ if score > fuzzy_match_threshold and score > best_score:
+ best_match = grounded_entity
+ best_score = score
+
+ # Record the mapping
+ if best_match:
+ determined_entity_match_mapping[entity_type][
+ f"{ungrounded_entity}"
+ ] = f"{best_match}"
+ else:
+ # No match found, this becomes a new unique entity
+ determined_entity_match_mapping[entity_type][
+ f"{ungrounded_entity}"
+ ] = "Other"
+
+ # Log the results
+ logger.info("Entity matching results:")
+ for entity_type, mappings in determined_entity_match_mapping.items():
+ logger.info(f"\nEntity type: {entity_type}")
+ for original, matched in mappings.items():
+ if original != matched:
+ logger.info(f" Mapped: {original} -> {matched}")
+ else:
+ logger.info(f" New unique entity: {original}")
+
+ return determined_entity_match_mapping
+
+
+def kg_clustering(
+ tenant_id: str, index_name: str, processing_chunk_batch_size: int = 8
+) -> None:
+ """
+ Here we will cluster the extractions based on their cluster frameworks.
+ Initially, this will only focus on grounded entities with pre-determined
+ relationships, so 'clustering' is actually not yet required.
+ However, we may need to reconcile entities coming from different sources.
+
+ The primary purpose of this function is to populate the actual KG tables
+ from the temp_extraction tables.
+
+ This will change with deep extraction, where grounded-sourceless entities
+ can be extracted and then need to be clustered.
+ """
+
+ logger.info(f"Starting kg clustering for tenant {tenant_id}")
+
+ ## Retrieval
+
+ source_documents_w_successful_transfers: Set[str] = set()
+ source_documents_w_failed_transfers: Set[str] = set()
+
+ primary_llm, fast_llm = get_default_llms()
+
+ with get_session_with_current_tenant() as db_session:
+
+ relationship_types = get_all_relationship_types(
+ db_session, kg_stage=KGStage.EXTRACTED
+ )
+
+ relationships = get_all_relationships(db_session, kg_stage=KGStage.EXTRACTED)
+
+ grounded_entities = get_entities_by_grounding(
+ db_session, KGStage.EXTRACTED, KGGroundingType.GROUNDED
+ )
+
+ ## Clustering
+
+ # TODO: re-implement clustering of ungrounded entities as well as
+ # grounded entities that do not have a source document with deep extraction
+ # enabled!
+ # For now we would just create a trivial entity mapping from the
+ # 'unclustered' entities to the 'clustered' entities. So we can simply
+ # transfer the entity information from the Staging to the Normalized
+ # tables.
+ # This will be reimplemented when deep extraction is enabled.
+
+ ## Database operations
+
+ # create the clustered objects - entities
+
+ transferred_entities: list[str] = []
+ for grounded_entity in grounded_entities:
+ with get_session_with_current_tenant() as db_session:
+ added_entity = add_entity(
+ db_session,
+ KGStage.NORMALIZED,
+ entity_type=grounded_entity.entity_type_id_name,
+ name=grounded_entity.name,
+ occurrences=grounded_entity.occurrences or 1,
+ document_id=grounded_entity.document_id or None,
+ attributes=grounded_entity.attributes or None,
+ )
+
+ db_session.commit()
+
+ if added_entity:
+ transferred_entities.append(added_entity.id_name)
+
+ transferred_relationship_types: list[str] = []
+ for relationship_type in relationship_types:
+ with get_session_with_current_tenant() as db_session:
+ added_relationship_type_id_name = add_relationship_type(
+ db_session,
+ KGStage.NORMALIZED,
+ source_entity_type=relationship_type.source_entity_type_id_name,
+ relationship_type=relationship_type.type,
+ target_entity_type=relationship_type.target_entity_type_id_name,
+ extraction_count=relationship_type.occurrences or 1,
+ )
+
+ db_session.commit()
+
+ transferred_relationship_types.append(added_relationship_type_id_name)
+
+ transferred_relationships: list[str] = []
+ for relationship in relationships:
+ with get_session_with_current_tenant() as db_session:
+ try:
+ added_relationship = add_relationship(
+ db_session,
+ KGStage.NORMALIZED,
+ relationship_id_name=relationship.id_name,
+ source_document_id=relationship.source_document or "",
+ occurrences=relationship.occurrences or 1,
+ )
+
+ if relationship.source_document:
+ source_documents_w_successful_transfers.add(
+ relationship.source_document
+ )
+
+ db_session.commit()
+
+ transferred_relationships.append(added_relationship.id_name)
+
+ except Exception as e:
+ if relationship.source_document:
+ source_documents_w_failed_transfers.add(
+ relationship.source_document
+ )
+ logger.error(
+ f"Error transferring relationship {relationship.id_name}: {e}"
+ )
+
+ # TODO: remove the /relationship types & entities that correspond to relationships
+ # source documents that failed to transfer. I.e, do a proper rollback
greptile
logic: Missing rollback implementation for failed transfers could leave database in inconsistent state
diff block
+import { ChangeResourceRecordSetsCommand, Route53Client } from "@aws-sdk/client-route-53";
+import * as x509 from "@peculiar/x509";
+import acme from "acme-client";
+import { KeyObject } from "crypto";
+
+import { TableName } from "@app/db/schemas";
+import { BadRequestError, NotFoundError } from "@app/lib/errors";
+import { OrgServiceActor } from "@app/lib/types";
+import { TAppConnectionDALFactory } from "@app/services/app-connection/app-connection-dal";
+import { AppConnection, AWSRegion } from "@app/services/app-connection/app-connection-enums";
+import { decryptAppConnection } from "@app/services/app-connection/app-connection-fns";
+import { TAppConnectionServiceFactory } from "@app/services/app-connection/app-connection-service";
+import { getAwsConnectionConfig } from "@app/services/app-connection/aws/aws-connection-fns";
+import { TAwsConnection, TAwsConnectionConfig } from "@app/services/app-connection/aws/aws-connection-types";
+import { TCertificateBodyDALFactory } from "@app/services/certificate/certificate-body-dal";
+import { TCertificateDALFactory } from "@app/services/certificate/certificate-dal";
+import { TCertificateSecretDALFactory } from "@app/services/certificate/certificate-secret-dal";
+import {
+ CertExtendedKeyUsage,
+ CertKeyAlgorithm,
+ CertKeyUsage,
+ CertStatus
+} from "@app/services/certificate/certificate-types";
+import { TKmsServiceFactory } from "@app/services/kms/kms-service";
+import { TPkiSubscriberDALFactory } from "@app/services/pki-subscriber/pki-subscriber-dal";
+import { TProjectDALFactory } from "@app/services/project/project-dal";
+import { getProjectKmsCertificateKeyId } from "@app/services/project/project-fns";
+
+import { TCertificateAuthorityDALFactory } from "../certificate-authority-dal";
+import { CaStatus, CaType } from "../certificate-authority-enums";
+import { keyAlgorithmToAlgCfg } from "../certificate-authority-fns";
+import { TExternalCertificateAuthorityDALFactory } from "../external-certificate-authority-dal";
+import { AcmeDnsProvider } from "./acme-certificate-authority-enums";
+import { AcmeCertificateAuthorityCredentialsSchema } from "./acme-certificate-authority-schemas";
+import {
+ TAcmeCertificateAuthority,
+ TCreateAcmeCertificateAuthorityDTO,
+ TUpdateAcmeCertificateAuthorityDTO
+} from "./acme-certificate-authority-types";
+
+type TAcmeCertificateAuthorityFnsDeps = {
+ appConnectionDAL: Pick<TAppConnectionDALFactory, "findById">;
+ appConnectionService: Pick<TAppConnectionServiceFactory, "connectAppConnectionById">;
+ certificateAuthorityDAL: Pick<
+ TCertificateAuthorityDALFactory,
+ "create" | "transaction" | "findByIdWithAssociatedCa" | "updateById" | "findWithAssociatedCa"
+ >;
+ externalCertificateAuthorityDAL: Pick<TExternalCertificateAuthorityDALFactory, "create" | "update">;
+ certificateDAL: Pick<TCertificateDALFactory, "create" | "transaction">;
+ certificateBodyDAL: Pick<TCertificateBodyDALFactory, "create">;
+ certificateSecretDAL: Pick<TCertificateSecretDALFactory, "create">;
+ kmsService: Pick<
+ TKmsServiceFactory,
+ "encryptWithKmsKey" | "generateKmsKey" | "createCipherPairWithDataKey" | "decryptWithKmsKey"
+ >;
+ pkiSubscriberDAL: Pick<TPkiSubscriberDALFactory, "findById">;
+ projectDAL: Pick<TProjectDALFactory, "findById" | "findOne" | "updateById" | "transaction">;
+};
+
+type DBConfigurationColumn = {
+ dnsProvider: string;
+ directoryUrl: string;
+ accountEmail: string;
+ hostedZoneId: string;
+};
+
+export const castDbEntryToAcmeCertificateAuthority = (
+ ca: Awaited<ReturnType<TCertificateAuthorityDALFactory["findByIdWithAssociatedCa"]>>
+): TAcmeCertificateAuthority & { credentials: unknown } => {
+ if (!ca.externalCa?.id) {
+ throw new BadRequestError({ message: "Malformed ACME certificate authority" });
+ }
+
+ const dbConfigurationCol = ca.externalCa.configuration as DBConfigurationColumn;
+
+ return {
+ id: ca.id,
+ type: CaType.ACME,
+ disableDirectIssuance: ca.disableDirectIssuance,
+ name: ca.externalCa.name,
+ projectId: ca.projectId,
+ credentials: ca.externalCa.credentials,
+ configuration: {
+ dnsAppConnectionId: ca.externalCa.dnsAppConnectionId as string,
+ dnsProviderConfig: {
+ provider: dbConfigurationCol.dnsProvider as AcmeDnsProvider,
+ hostedZoneId: dbConfigurationCol.hostedZoneId
+ },
+ directoryUrl: dbConfigurationCol.directoryUrl,
+ accountEmail: dbConfigurationCol.accountEmail
+ },
+ status: ca.externalCa.status as CaStatus
+ };
+};
+
+export const route53InsertTxtRecord = async (
+ connection: TAwsConnectionConfig,
+ hostedZoneId: string,
+ domain: string,
+ value: string
+) => {
+ const config = await getAwsConnectionConfig(connection, AWSRegion.US_WEST_1); // REGION is irrelevant because Route53 is global
+ const route53Client = new Route53Client({
+ credentials: config.credentials!,
+ region: config.region
+ });
+
+ const command = new ChangeResourceRecordSetsCommand({
+ HostedZoneId: hostedZoneId,
+ ChangeBatch: {
+ Comment: "Set ACME challenge TXT record",
+ Changes: [
+ {
+ Action: "UPSERT",
+ ResourceRecordSet: {
+ Name: domain,
+ Type: "TXT",
+ TTL: 30,
+ ResourceRecords: [{ Value: value }]
+ }
+ }
+ ]
+ }
+ });
+
+ await route53Client.send(command);
+};
+
+export const route53DeleteTxtRecord = async (
+ connection: TAwsConnectionConfig,
+ hostedZoneId: string,
+ domain: string,
+ value: string
+) => {
+ const config = await getAwsConnectionConfig(connection, AWSRegion.US_WEST_1); // REGION is irrelevant because Route53 is global
+ const route53Client = new Route53Client({
+ credentials: config.credentials!,
+ region: config.region
+ });
+
+ const command = new ChangeResourceRecordSetsCommand({
+ HostedZoneId: hostedZoneId,
+ ChangeBatch: {
+ Comment: "Delete ACME challenge TXT record",
+ Changes: [
+ {
+ Action: "DELETE",
+ ResourceRecordSet: {
+ Name: domain,
+ Type: "TXT",
+ TTL: 30,
+ ResourceRecords: [{ Value: value }]
+ }
+ }
+ ]
+ }
+ });
+
+ await route53Client.send(command);
+};
+
+export const AcmeCertificateAuthorityFns = ({
+ appConnectionDAL,
+ appConnectionService,
+ certificateAuthorityDAL,
+ externalCertificateAuthorityDAL,
+ certificateDAL,
+ certificateBodyDAL,
+ certificateSecretDAL,
+ kmsService,
+ projectDAL,
+ pkiSubscriberDAL
+}: TAcmeCertificateAuthorityFnsDeps) => {
+ const createCertificateAuthority = async ({
+ name,
+ projectId,
+ configuration,
+ disableDirectIssuance,
+ actor,
+ status
+ }: {
+ status: CaStatus;
+ name: string;
+ projectId: string;
+ configuration: TCreateAcmeCertificateAuthorityDTO["configuration"];
+ disableDirectIssuance: boolean;
+ actor: OrgServiceActor;
+ }) => {
+ const { dnsAppConnectionId, directoryUrl, accountEmail, dnsProviderConfig } = configuration;
+ const appConnection = await appConnectionDAL.findById(dnsAppConnectionId);
+
+ if (!appConnection) {
+ throw new NotFoundError({ message: `App connection with ID '${dnsAppConnectionId}' not found` });
+ }
+
+ if (dnsProviderConfig.provider === AcmeDnsProvider.Route53 && appConnection.app !== AppConnection.AWS) {
+ throw new BadRequestError({
+ message: `App connection with ID '${dnsAppConnectionId}' is not an AWS connection`
+ });
+ }
+
+ // validates permission to connect
+ await appConnectionService.connectAppConnectionById(appConnection.app as AppConnection, dnsAppConnectionId, actor);
+
+ const caEntity = await certificateAuthorityDAL.transaction(async (tx) => {
+ try {
+ const ca = await certificateAuthorityDAL.create(
+ {
+ projectId,
+ disableDirectIssuance
+ },
+ tx
+ );
+
+ await externalCertificateAuthorityDAL.create(
+ {
+ certificateAuthorityId: ca.id,
+ dnsAppConnectionId,
+ type: CaType.ACME,
+ name,
+ projectId,
+ configuration: {
+ directoryUrl,
+ accountEmail,
+ dnsProvider: dnsProviderConfig.provider,
+ hostedZoneId: dnsProviderConfig.hostedZoneId
+ },
+ status
+ },
+ tx
+ );
+
+ return await certificateAuthorityDAL.findByIdWithAssociatedCa(ca.id, tx);
+ } catch (error) {
+ // @ts-expect-error We're expecting a database error
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
+ if (error?.error?.code === "23505") {
+ throw new BadRequestError({
+ message: "Certificate authority with the same name already exists in your project"
+ });
+ }
+ throw error;
+ }
+ });
+
+ if (!caEntity.externalCa?.id) {
+ throw new BadRequestError({ message: "Failed to create external certificate authority" });
+ }
+
+ return castDbEntryToAcmeCertificateAuthority(caEntity);
+ };
+
+ const updateCertificateAuthority = async ({
+ id,
+ status,
+ configuration,
+ disableDirectIssuance,
+ actor,
+ name
+ }: {
+ id: string;
+ status?: CaStatus;
+ configuration: TUpdateAcmeCertificateAuthorityDTO["configuration"];
+ disableDirectIssuance?: boolean;
+ actor: OrgServiceActor;
+ name?: string;
+ }) => {
+ const updatedCa = await certificateAuthorityDAL.transaction(async (tx) => {
+ if (configuration) {
+ const { dnsAppConnectionId, directoryUrl, accountEmail, dnsProviderConfig } = configuration;
+ const appConnection = await appConnectionDAL.findById(dnsAppConnectionId);
+
+ if (!appConnection) {
+ throw new NotFoundError({ message: `App connection with ID '${dnsAppConnectionId}' not found` });
+ }
+
+ if (dnsProviderConfig.provider === AcmeDnsProvider.Route53 && appConnection.app !== AppConnection.AWS) {
+ throw new BadRequestError({
+ message: `App connection with ID '${dnsAppConnectionId}' is not an AWS connection`
+ });
+ }
+
+ // validates permission to connect
+ await appConnectionService.connectAppConnectionById(
+ appConnection.app as AppConnection,
+ dnsAppConnectionId,
+ actor
+ );
+
+ await externalCertificateAuthorityDAL.update(
+ {
+ certificateAuthorityId: id,
+ type: CaType.ACME
+ },
+ {
+ dnsAppConnectionId,
+ configuration: {
+ directoryUrl,
+ accountEmail,
+ dnsProvider: dnsProviderConfig.provider,
+ hostedZoneId: dnsProviderConfig.hostedZoneId
+ }
+ },
+ tx
+ );
+ }
+
+ await externalCertificateAuthorityDAL.update(
+ {
+ certificateAuthorityId: id,
+ type: CaType.ACME
+ },
+ {
+ name,
+ status
+ },
+ tx
+ );
+
+ if (disableDirectIssuance !== undefined) {
+ await certificateAuthorityDAL.updateById(
+ id,
+ {
+ disableDirectIssuance
+ },
+ tx
+ );
+ }
+
+ return certificateAuthorityDAL.findByIdWithAssociatedCa(id, tx);
+ });
+
+ if (!updatedCa.externalCa?.id) {
+ throw new BadRequestError({ message: "Failed to update external certificate authority" });
+ }
+
+ return castDbEntryToAcmeCertificateAuthority(updatedCa);
+ };
+
+ const listCertificateAuthorities = async ({ projectId }: { projectId: string }) => {
+ const cas = await certificateAuthorityDAL.findWithAssociatedCa({
+ [`${TableName.CertificateAuthority}.projectId` as "projectId"]: projectId,
+ [`${TableName.ExternalCertificateAuthority}.type` as "type"]: CaType.ACME
+ });
+
+ return cas.map(castDbEntryToAcmeCertificateAuthority);
+ };
+
+ const orderSubscriberCertificate = async (subscriberId: string) => {
+ const subscriber = await pkiSubscriberDAL.findById(subscriberId);
+ if (!subscriber.caId) {
+ throw new BadRequestError({ message: "Subscriber does not have a CA" });
+ }
+
+ const ca = await certificateAuthorityDAL.findByIdWithAssociatedCa(subscriber.caId);
+ if (!ca.externalCa || ca.externalCa.type !== CaType.ACME) {
+ throw new BadRequestError({ message: "CA is not an ACME CA" });
+ }
+
+ const acmeCa = castDbEntryToAcmeCertificateAuthority(ca);
+ if (acmeCa.status !== CaStatus.ACTIVE) {
+ throw new BadRequestError({ message: "CA is disabled" });
+ }
+
+ const certificateManagerKmsId = await getProjectKmsCertificateKeyId({
+ projectId: ca.projectId,
+ projectDAL,
+ kmsService
+ });
+
+ const kmsEncryptor = await kmsService.encryptWithKmsKey({
+ kmsId: certificateManagerKmsId
+ });
+
+ const kmsDecryptor = await kmsService.decryptWithKmsKey({
+ kmsId: certificateManagerKmsId
+ });
+
+ let accountKey: Buffer | undefined;
+ if (acmeCa.credentials) {
+ const decryptedCredentials = await kmsDecryptor({
+ cipherTextBlob: acmeCa.credentials as Buffer
+ });
+
+ const parsedCredentials = await AcmeCertificateAuthorityCredentialsSchema.parseAsync(
+ JSON.parse(decryptedCredentials.toString("utf8"))
+ );
+
+ accountKey = Buffer.from(parsedCredentials.accountKey, "base64");
+ }
+ if (!accountKey) {
+ accountKey = await acme.crypto.createPrivateRsaKey();
+ const newCredentials = {
+ accountKey: accountKey.toString("base64")
+ };
+ const { cipherTextBlob: encryptedNewCredentials } = await kmsEncryptor({
+ plainText: Buffer.from(JSON.stringify(newCredentials))
+ });
+ await externalCertificateAuthorityDAL.update(
+ {
+ certificateAuthorityId: acmeCa.id
+ },
+ {
+ credentials: encryptedNewCredentials
+ }
+ );
greptile
logic: Missing transaction parameter in externalCertificateAuthorityDAL.update call, could lead to inconsistent state.
suggested fix
await externalCertificateAuthorityDAL.update( { certificateAuthorityId: acmeCa.id }, { credentials: encryptedNewCredentials }, tx );
diff block
duration,
values.isPrimaryMetricSignificant(0)
)
- actions.closeConclusionModal()
+ actions.closeStopExperimentModal()
greptile
logic: closeStopExperimentModal is called but endExperiment action is not cleaned up in the modal's close handler, which could leave experiment in inconsistent state if modal is closed without completing the action
diff block
+use chirp_workflow::prelude::*;
+use rivet_operation::prelude::proto::backend::pkg::*;
+
+use lazy_static::lazy_static;
+use futures_util::{FutureExt, StreamExt, TryStreamExt};
+use rand::{seq::IteratorRandom, Rng};
+use serde_json::json;
+
+lazy_static! {
+ // Load adjectives from file
+ static ref ADJECTIVES: Vec<&'static str> = include_str!("../../../adjectives.txt")
+ .split('\n')
+ .filter(|l| !l.is_empty())
+ .map(|l| l.trim())
+ .collect();
+}
+
+const UPLOAD_BATCH_SIZE: usize = 256;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct Input {
+ pub user_id: Uuid,
+ pub display_name: Option<String>,
+}
+
+#[workflow]
+pub async fn user(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> {
+ let (display_name, _account_number) = ctx.activity(InsertDbInput {
+ user_id: input.user_id,
+ display_name: input.display_name.clone(),
+ }).await?;
+
+ ctx.msg(CreateComplete {})
+ .tag("user_id", input.user_id)
+ .send()
+ .await?;
+
+ ctx.activity(PublishCreationAnalyticsInput {
+ user_id: input.user_id,
+ display_name,
+ }).await?;
+
+ ctx.repeat(|ctx| {
+ let user_id = input.user_id;
+
+ async move {
+ match ctx.listen::<Main>().await? {
+ Main::AdminSet(_) => {
+ ctx.activity(AdminSetInput {
+ user_id
+ }).await?;
+
+ ctx.msg(Update {})
+ .tag("user_id", user_id)
+ .send()
+ .await?;
+ },
+ Main::Delete(_) => {
+ return Ok(Loop::Break(()));
+ },
+ }
+
+ Ok(Loop::Continue)
+ }
+ .boxed()
+ }).await?;
+
+ ctx.activity(DeleteIdentitiesInput {
+ user_id: input.user_id,
+ }).await?;
+
+ ctx.activity(DeleteUploadsInput {
+ user_id: input.user_id,
+ }).await?;
+
+ ctx.activity(RemoveFromTeamsInput {
+ user_id: input.user_id,
+ }).await?;
+
+ ctx.msg(DeleteComplete {})
+ .tag("user_id", input.user_id)
+ .send()
+ .await?;
+
+ ctx.msg(Update {})
+ .tag("user_id", input.user_id)
+ .send()
+ .await?;
+
+ ctx.activity(PublishDeletionAnalyticsInput {
+ user_id: input.user_id,
+ }).await?;
greptile
logic: deletion activities are executed even if RedactUserRecord is not called, which could leave inconsistent state
diff block
return workflowRunRepository.update(workflowRunToUpdate.id, {
status,
- output,
endedAt: new Date().toISOString(),
+ output: {
+ ...(workflowRunToUpdate.output ?? {}),
+ error,
+ },
+ });
+ }
+
+ async saveWorkflowRunState({
+ workflowRunId,
+ output,
+ stepIndex,
+ context,
+ }: {
+ workflowRunId: string;
+ output: WorkflowRunOutput;
+ stepIndex: number;
+ context: Record<string, any>;
+ }) {
+ const workflowRunRepository =
+ await this.twentyORMManager.getRepository<WorkflowRunWorkspaceEntity>(
+ 'workflowRun',
+ );
+
+ const workflowRunToUpdate = await workflowRunRepository.findOneBy({
+ id: workflowRunId,
+ });
+
+ if (!workflowRunToUpdate) {
+ throw new WorkflowRunException(
+ 'No workflow run to end',
+ WorkflowRunExceptionCode.WORKFLOW_RUN_NOT_FOUND,
+ );
+ }
+
+ return workflowRunRepository.update(workflowRunId, {
+ output,
+ stepIndex,
+ context,
});
greptile
logic: No validation that workflow is in RUNNING state before updating. Could lead to inconsistent state if called on completed/failed workflows
suggested fix
+ if (workflowRunToUpdate.status !== WorkflowRunStatus.RUNNING) {
throw new WorkflowRunException(
+ 'Workflow state cannot be updated as it is not running',
+ WorkflowRunExceptionCode.INVALID_OPERATION,
);
}
return workflowRunRepository.update(workflowRunId, {
output,
stepIndex,
context,
});
diff block
-from typing import Union
-from collections.abc import Callable
+import logging
from infi.clickhouse_orm import migrations
-from posthog.clickhouse.client.execute import sync_execute
+from posthog.clickhouse.client.connection import NodeRole
+from posthog.clickhouse.cluster import get_cluster
+from posthog.settings.data_stores import CLICKHOUSE_MIGRATIONS_CLUSTER
+logger = logging.getLogger("migrations")
-def run_sql_with_exceptions(sql: Union[str, Callable[[], str]], settings=None):
+
+def run_sql_with_exceptions(sql: str, settings=None, node_role: NodeRole = NodeRole.WORKER):
"""
migrations.RunSQL does not raise exceptions, so we need to wrap it in a function that does.
+ node_role is set to WORKER by default to keep compatibility with the old migrations.
"""
- if settings is None:
- settings = {}
+ cluster = get_cluster(client_settings=settings, cluster=CLICKHOUSE_MIGRATIONS_CLUSTER)
- def run_sql(database):
- nonlocal sql
- if callable(sql):
- sql = sql()
- sync_execute(sql, settings=settings)
+ def run_migration():
+ if node_role == NodeRole.ALL:
+ logger.info(" Running migration on coordinators and workers")
+ return cluster.map_all_hosts(lambda client: client.execute(sql)).result()
greptile
logic: No error handling for failed migrations across nodes - could leave cluster in inconsistent state
diff block
+import { useAuth } from '@/auth/hooks/useAuth';
+import { isAppWaitingForFreshObjectMetadataState } from '@/object-metadata/states/isAppWaitingForFreshObjectMetadataState';
+import { useSetRecoilState } from 'recoil';
+
+export const useImpersonationAuth = () => {
+ const { getAuthTokensFromLoginToken } = useAuth();
+ const setIsAppWaitingForFreshObjectMetadata = useSetRecoilState(
+ isAppWaitingForFreshObjectMetadataState,
+ );
+
+ const executeImpersonationAuth = async (loginToken: string) => {
+ setIsAppWaitingForFreshObjectMetadata(true);
+ await getAuthTokensFromLoginToken(loginToken);
+ setIsAppWaitingForFreshObjectMetadata(false);
+ };
greptile
logic: missing try/catch block around auth token fetch - could leave app in inconsistent state if auth fails
suggested fix
const executeImpersonationAuth = async (loginToken: string) => {
setIsAppWaitingForFreshObjectMetadata(true);
+ try {
await getAuthTokensFromLoginToken(loginToken);
+ } catch (error) {
setIsAppWaitingForFreshObjectMetadata(false);
+ throw error;
+ }
setIsAppWaitingForFreshObjectMetadata(false);
};
diff block
</div>
)}
- {taxonomicGroupTypes.map((groupType) => {
- return (
- <div key={groupType} className={clsx(groupType === openTab ? 'block' : 'hidden')}>
- <BindLogic
- logic={infiniteListLogic}
- props={{ ...taxonomicFilterLogicProps, listGroupType: groupType }}
- >
- {showEmptyState && <TaxonomicFilterEmptyState groupType={groupType} />}
- {!showEmptyState && listComponent}
- </BindLogic>
- </div>
- )
- })}
- </>
+ <div className={cn('flex-1 overflow-hidden min-h-0')}>
+ {taxonomicGroupTypes.map((groupType) => {
+ return (
+ <div key={groupType} className={cn(groupType === openTab ? 'flex flex-col h-full' : 'hidden')}>
+ <BindLogic
+ logic={infiniteListLogic}
+ props={{ ...taxonomicFilterLogicProps, listGroupType: groupType }}
+ >
+ {showEmptyState && <TaxonomicFilterEmptyState groupType={groupType} />}
+ {!showEmptyState && listComponent}
greptile
logic: showEmptyState condition uses openTab but listComponent uses activeTab, which could lead to inconsistent states
diff block
+#
+# This module overrides distutils (also compatible with setuptools) "sdist"
+# command to perform pre- and post-processing as required for MicroPython's
+# upip package manager.
+#
+# Preprocessing steps:
+# * Creation of Python resource module (R.py) from each top-level package's
+# resources.
+# Postprocessing steps:
+# * Removing metadata files not used by upip (this includes setup.py)
+# * Recompressing gzip archive with 4K dictionary size so it can be
+# installed even on low-heap targets.
+#
+import sys
+import os
+import zlib
+from subprocess import Popen, PIPE
+import glob
+import tarfile
+import re
+import io
+
+from distutils.filelist import FileList
+from setuptools.command.sdist import sdist as _sdist
+
+
+def gzip_4k(inf, fname):
+ comp = zlib.compressobj(level=9, wbits=16 + 12)
+ with open(fname + ".out", "wb") as outf:
+ while 1:
+ data = inf.read(1024)
+ if not data:
+ break
+ outf.write(comp.compress(data))
+ outf.write(comp.flush())
+ os.rename(fname, fname + ".orig")
+ os.rename(fname + ".out", fname)
+
+
+FILTERS = [
+ # include, exclude, repeat
+ (r".+\.egg-info/(PKG-INFO|requires\.txt)", r"setup.py$"),
+ (r".+\.py$", r"[^/]+$"),
+ (None, r".+\.egg-info/.+"),
+]
+
+
+outbuf = io.BytesIO()
+
+def filter_tar(name):
+ fin = tarfile.open(name, "r:gz")
+ fout = tarfile.open(fileobj=outbuf, mode="w")
+ for info in fin:
+# print(info)
+ if not "/" in info.name:
+ continue
+ fname = info.name.split("/", 1)[1]
+ include = None
+
+ for inc_re, exc_re in FILTERS:
+ if include is None and inc_re:
+ if re.match(inc_re, fname):
+ include = True
+
+ if include is None and exc_re:
+ if re.match(exc_re, fname):
+ include = False
+
+ if include is None:
+ include = True
+
+ if include:
+ print("including:", fname)
+ else:
+ print("excluding:", fname)
+ continue
+
+ farch = fin.extractfile(info)
+ fout.addfile(info, farch)
+ fout.close()
+ fin.close()
+
+
+def make_resource_module(manifest_files):
+ resources = []
+ # Any non-python file included in manifest is resource
+ for fname in manifest_files:
+ ext = fname.rsplit(".", 1)[1]
+ if ext != "py":
+ resources.append(fname)
+
+ if resources:
+ print("creating resource module R.py")
+ resources.sort()
+ last_pkg = None
+ r_file = None
+ for fname in resources:
+ try:
+ pkg, res_name = fname.split("/", 1)
+ except ValueError:
+ print("not treating %s as a resource" % fname)
+ continue
+ if last_pkg != pkg:
+ last_pkg = pkg
+ if r_file:
+ r_file.write("}\n")
+ r_file.close()
+ r_file = open(pkg + "/R.py", "w")
+ r_file.write("R = {\n")
greptile
style: no error handling for file operations could leave R.py in inconsistent state
diff block
const CLIEngine = require('eslint').CLIEngine;
const listChangedFiles = require('../shared/listChangedFiles');
-const allPaths = ['**/*.js'];
+const allPaths = ['**/*.js', '**/*.jsx', '**/*.ts', '**/*.tsx'];
let changedFiles = null;
+let eslintCache = new Map();
-function runESLintOnFilesWithOptions(filePatterns, onlyChanged, options) {
- const cli = new CLIEngine(options);
- const formatter = cli.getFormatter();
+async function runESLintOnFilesWithOptions(filePatterns, onlyChanged, options = {}) {
+ const defaultOptions = {
+ cache: true,
+ cacheLocation: '.eslintcache',
+ fix: false,
+ maxWarnings: 100,
+ ...options
+ };
+
+ const cli = new CLIEngine(defaultOptions);
+ const formatter = cli.getFormatter('stylish');
if (onlyChanged && changedFiles === null) {
- // Calculate lazily.
- changedFiles = [...listChangedFiles()];
+ try {
+ changedFiles = [...await listChangedFiles()];
+ changedFiles.forEach(file => {
+ if (!eslintCache.has(file)) {
+ eslintCache.set(file, null);
+ }
+ });
+ } catch (error) {
+ console.error('Error getting changed files:', error);
+ throw error;
+ }
}
greptile
logic: The cache is populated even if there's an error getting changed files, which could lead to inconsistent state. Consider moving the cache population after the try-catch block.
suggested fix
+ if (onlyChanged && changedFiles === null) {
try {
changedFiles = [...await listChangedFiles()];
} catch (error) {
console.error('Error getting changed files:', error);
throw error;
}
changedFiles.forEach(file => {
if (!eslintCache.has(file)) {
eslintCache.set(file, null);
}
});
}
diff block
)
)
db_session.commit()
+
+
+class CreateUserRequest(BaseModel):
+ email: str
+ password: str
+ role: UserRole
+
+
+@router.post("/manage/admin/create-user")
+async def create_user(
+ user_request: CreateUserRequest,
+ current_user: User | None = Depends(current_admin_user),
+ db_session: AsyncSession = Depends(get_async_session),
+) -> dict:
+ """Create a new user with the specified role."""
+ try:
+ # Check if user already exists
+ existing_user = await db_session.execute(
+ select(User).where(User.email == user_request.email)
+ )
+ existing_user = existing_user.scalar_one_or_none()
+
+ if existing_user:
+ raise HTTPException(
+ status_code=400, detail=f"User with email {user_request.email} already exists"
+ )
+
+ # Create the user
+ user = await create_user_async(
+ db=db_session,
+ email=user_request.email,
+ role=user_request.role,
+ is_active=True,
+ )
+
+ # Set the password
+ user.set_password(user_request.password)
+ await db_session.commit()
greptile
logic: There's a potential race condition here. If the commit after set_password fails, you'll have an inconsistent state. Consider moving the password setting before the first commit.
suggested fix
+ # Set password before committing to ensure atomic operation
user.set_password(user_request.password)
+ db.add(user)
await db_session.commit()
diff block
+import type { CollectionUpdateCollection } from '@/api/buster_socket/collections';
+import { useSocketQueryMutation } from '@/api/buster_socket_query';
+import { useMemoizedFn } from 'ahooks';
+import { BusterCollection, BusterCollectionListItem } from '@/api/asset_interfaces';
+import { queryKeys } from '@/api/query_keys';
+import { useQueryClient } from '@tanstack/react-query';
+
+export const useCollectionUpdate = () => {
+ const queryClient = useQueryClient();
+
+ const { mutateAsync: updateCollection, isPending: isUpdatingCollection } = useSocketQueryMutation(
+ '/collections/update',
+ '/collections/update:collectionState',
+ null,
+ (_, variables) => {
+ const collectionId = variables.id!;
+ const collectionOptions = queryKeys['/collections/get:collectionState'](collectionId);
+ const queryKey = collectionOptions.queryKey;
+ const collection = queryClient.getQueryData(queryKey);
+ if (collection) {
+ const newCollection: BusterCollection = {
+ ...collection!,
+ ...(variables as Partial<BusterCollection>)
+ };
+ queryClient.setQueryData(queryKey, newCollection);
+ }
greptile
style: optimistic update performed without error handling - could leave cache in inconsistent state if mutation fails
diff block
const CLIEngine = require('eslint').CLIEngine;
const listChangedFiles = require('../shared/listChangedFiles');
-const allPaths = ['**/*.js'];
+const allPaths = ['**/*.js', '**/*.jsx', '**/*.ts', '**/*.tsx'];
let changedFiles = null;
+let eslintCache = new Map();
-function runESLintOnFilesWithOptions(filePatterns, onlyChanged, options) {
- const cli = new CLIEngine(options);
- const formatter = cli.getFormatter();
+async function runESLintOnFilesWithOptions(filePatterns, onlyChanged, options = {}) {
+ const defaultOptions = {
+ cache: true,
+ cacheLocation: '.eslintcache',
+ fix: false,
+ maxWarnings: 100,
+ ...options
+ };
+
+ const cli = new CLIEngine(defaultOptions);
+ const formatter = cli.getFormatter('stylish');
if (onlyChanged && changedFiles === null) {
- // Calculate lazily.
- changedFiles = [...listChangedFiles()];
+ try {
+ changedFiles = [...await listChangedFiles()];
+ changedFiles.forEach(file => {
+ if (!eslintCache.has(file)) {
+ eslintCache.set(file, null);
+ }
+ });
+ } catch (error) {
+ console.error('Error getting changed files:', error);
+ throw error;
+ }
}
greptile
logic: The error is caught and immediately rethrown without cleanup. This could leave changedFiles in an inconsistent state if an error occurs mid-population.
diff block
return Object.values(featureToOptions);
};
+
+export const checkStripeProductExists = async ({
+ sb,
+ org,
+ env,
+ product,
+}: {
+ sb: SupabaseClient;
+ org: Organization;
+ env: AppEnv;
+ product: FullProduct;
+}) => {
+ let createNew = false;
+ let stripeCli = createStripeCli({
+ org,
+ env,
+ });
+
+ if (!product.processor || !product.processor.id) {
+ createNew = true;
+ } else {
+ try {
+ await stripeCli.products.retrieve(product.processor!.id);
+ } catch (error) {
+ createNew = true;
+ }
+ }
+
+ if (createNew) {
+ console.log("Creating new product in Stripe");
+ const stripeProduct = await stripeCli.products.create({
+ name: product.name,
+ });
+
+ await ProductService.update({
+ sb,
+ internalId: product.internal_id,
+ update: {
+ processor: { id: stripeProduct.id, type: ProcessorType.Stripe },
+ },
+ });
+
greptile
logic: No error handling if ProductService.update fails. Could leave system in inconsistent state with Stripe product created but not updated in database
diff block
+package com.egardia.sunrise.controller;
+
+import com.egardia.dto.account.AccountInformation;
+import com.egardia.sunrise.entity.SunriseBaseEntity.DeviceDetails;
+import com.egardia.sunrise.entity.SunriseLogin;
+import com.egardia.sunrise.entity.SunriseOrder;
+import com.egardia.sunrise.entity.SunriseOrderResponse;
+import com.egardia.sunrise.entity.SunriseOrderResponse.Status;
+import com.egardia.sunrise.entity.SunriseOrderTypes.ActivationOrder;
+import com.egardia.sunrise.entity.SunriseOrderTypes.CeaseOrder;
+import com.egardia.sunrise.entity.SunriseOrderTypes.ModifyOrder;
+import com.egardia.sunrise.entity.SunriseOrderTypes.SuspendOrder;
+import com.egardia.sunrise.entity.SunriseSite;
+import com.egardia.sunrise.entity.SunriseSite.AccountState;
+import com.egardia.sunrise.exception.InvalidOrderException;
+import com.egardia.sunrise.repository.SunriseLoginRepository;
+import com.egardia.sunrise.repository.SunriseOrderRepository;
+import com.egardia.sunrise.repository.SunriseSiteRepository;
+import com.egardia.sunrise.service.EgardiaAccountService;
+import com.egardia.sunrise.service.EgardiaAccountService.HomeUserRel;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import jakarta.transaction.Transactional;
+import jakarta.validation.ConstraintViolation;
+import jakarta.validation.Validator;
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.client.HttpClientErrorException;
+
+@Slf4j
+@Transactional
+@RestController
+@RequestMapping("/v1/order")
+public class SunriseOrderController {
+
+ private static final String EMAIL_IN_USE = "Email already in use";
+
+ @Autowired
+ private SunriseOrderRepository sunriseOrderRepository;
+ @Autowired
+ private SunriseSiteRepository sunriseSiteRepository;
+ @Autowired
+ private SunriseLoginRepository sunriseLoginRepository;
+ @Autowired
+ private ObjectMapper objectMapper;
+ @Autowired
+ private Validator validator;
+
+ @Autowired
+ private EgardiaAccountService egardiaAccountService;
+
+ @GetMapping()
+ public String test() {
+ System.out.println("test endpoint");
+ return "orders endpoint ok";
+ }
+
+ @Transactional
+ @PostMapping()
+ public ResponseEntity<SunriseOrderResponse> newOrder(@RequestBody SunriseOrder order)
+ throws JsonProcessingException {
+ SunriseOrder.OrderType orderType =
+ order.getOrderDetails() != null ? order.getOrderDetails().getOrderType() : null;
+
+ Set<ConstraintViolation<SunriseOrder>> violations;
+ switch (orderType) {
+ case ACTIVATION -> violations = validator.validate(order, ActivationOrder.class);
+ case MODIFY, DOWNGRADE -> violations = validator.validate(order, ModifyOrder.class);
+ case SUSPEND -> violations = validator.validate(order, SuspendOrder.class);
+ case CEASE -> violations = validator.validate(order, CeaseOrder.class);
+ case null, default -> {
+ throw new InvalidOrderException(order, Status.ERROR_INVALID_ORDER_TYPE,
+ "Validation failed: Invalid value for field orderDetails.orderType. Allowed values are: ACTIVATION, MODIFY, SUSPEND, CEASE");
+ }
+ }
+
+ if (!violations.isEmpty()) {
+ String errorMessage = violations.stream()
+ .map(violation -> violation.getPropertyPath() + ": " + violation.getMessage())
+ .collect(Collectors.joining(", "));
+
+ throw new InvalidOrderException(order, Status.ERROR_VALIDATION_FAILED,
+ "Validation failed: [" + errorMessage + "]");
+ }
+
+ log.info("Received order: " + order);
+ SunriseOrderResponse res = new SunriseOrderResponse(order);
+
+ SunriseOrder existingOrder = sunriseOrderRepository.findByOrderIdForWrite(
+ order.getOrderId()).orElse(null);
+ if (existingOrder != null) {
+ throw new InvalidOrderException(order, Status.ERROR_DUPLICATE_ORDER,
+ "An order with this ID has already been handled");
+ }
+
+ SunriseSite site = sunriseSiteRepository.findBySiteIdForWrite(order.getSiteId())
+ .orElse(null);
+
+ boolean isActiveSite =
+ site != null && site.getAccountState() == SunriseSite.AccountState.ACTIVE;
+ boolean isActivatable =
+ site == null
+ || site.getAccountState() == SunriseSite.AccountState.CEASED
+ || site.getAccountState() == SunriseSite.AccountState.SUSPENDED;
+
+ if (!isActiveSite && orderType != SunriseOrder.OrderType.ACTIVATION) {
+ throw new InvalidOrderException(order, Status.ERROR_MISSING_SITE,
+ "No active site found for the siteId provided in this order.");
+ } else if (!isActivatable && orderType == SunriseOrder.OrderType.ACTIVATION) {
+ throw new InvalidOrderException(order, Status.ERROR_ALREADY_PROVISIONED,
+ "A site with this siteId is already provisioned.");
+ }
+
+ return switch (orderType) {
+ case ACTIVATION -> handleActivationOrder(order, res);
+ case MODIFY, DOWNGRADE -> handleModificationOrder(order, site, res);
+ case SUSPEND -> handleSuspendOrder(order, site, res);
+ case CEASE -> handleCeaseOrder(order, site, res);
+ };
+ }
+
+ private RegistrationInfo getRegistrationInfo(SunriseOrder order) {
+ AccountInformation accountInformation;
+ String gatewayCommonName;
+ try {
+ accountInformation = order.getAccountInformation();
+ gatewayCommonName = order.getGatewayCommonName();
+ } catch (IllegalArgumentException e) {
+ Status status = Status.valueOf(e.getMessage());
+
+ switch (status) {
+ case ERROR_MISSING_GATEWAY:
+ throw new InvalidOrderException(order, status,
+ "No accepted gateway found among the devices supplied in the order");
+ case ERROR_MULTIPLE_GATEWAYS:
+ throw new InvalidOrderException(order, status,
+ "Multiple gateways found among the devices supplied in the order");
+ default:
+ throw e;
+ }
+ } catch (Exception e) {
+ throw new InvalidOrderException(order, Status.ERROR,
+ "Error occurred while processing the order");
+ }
+ return new RegistrationInfo(accountInformation, gatewayCommonName);
+ }
+
+ private void checkExistingDevices(SunriseOrder order) {
+ checkExistingDevices(order, null);
+ }
+
+ private void checkExistingDevices(SunriseOrder order, SunriseSite site) {
+ List<String> deviceSerialNumbers = order.getDeviceDetails().stream()
+ .map(DeviceDetails::getDeviceSerialNumber)
+ .toList();
+
+ List<SunriseSite> sitesWithExistingDevice = sunriseSiteRepository.findByDeviceSerialNumbers(
+ deviceSerialNumbers);
+ // Filter out the site that is being modified
+ if (site != null) {
+ sitesWithExistingDevice = sitesWithExistingDevice.stream()
+ .filter(existingSite -> !existingSite.getSiteId().equals(site.getSiteId()))
+ .toList();
+ }
+ // Get the first site that has a matching device
+ SunriseSite siteWithExistingDevice = sitesWithExistingDevice.stream()
+ .findFirst()
+ .orElse(null);
+ // Find device that matches the serial number
+ if (siteWithExistingDevice != null) {
+ List<String> existingDeviceSerialNumbers = siteWithExistingDevice.getDeviceDetails()
+ .stream()
+ .map(DeviceDetails::getDeviceSerialNumber)
+ .toList();
+ String matchingSerial = deviceSerialNumbers.stream()
+ .filter(existingDeviceSerialNumbers::contains)
+ .findFirst()
+ .orElseThrow();
+ throw new InvalidOrderException(order, Status.ERROR_DEVICE_ALREADY_IN_USE,
+ "Device with serial number " + matchingSerial + " is already in use");
+ }
+ }
+
+ /**
+ * Handles activation order Checks the following things:
+ * - If the device serial numbers are already in use
+ * - If the gateway is missing or multiple gateways are found
+ *
+ * @param order The order to handle
+ * @param res The response object to update
+ * @return ResponseEntity<SunriseOrderResponse> The response object
+ */
+ private ResponseEntity<SunriseOrderResponse> handleActivationOrder(SunriseOrder order,
+ SunriseOrderResponse res) {
+
+ checkExistingDevices(order);
+
+ RegistrationInfo registrationInfo = getRegistrationInfo(order);
+ AccountInformation accountInformation = registrationInfo.accountInformation;
+ String gatewayCommonName = registrationInfo.gatewayCommonName;
+
+ // Store the randomly generated password (encrypted) so we can use the same password to get the token later
+ SunriseLogin login = new SunriseLogin(order.getSiteId(), accountInformation.getPassword());
+
+ // Once we know everything is in working order, we can save the order and login
+ sunriseOrderRepository.save(order);
+ sunriseLoginRepository.save(login);
+
+ HomeUserRel homeUserRel;
+ try {
+ homeUserRel = egardiaAccountService.createAccount(gatewayCommonName,
+ accountInformation);
+ } catch (HttpClientErrorException e) {
+ if (e.getStatusCode() != HttpStatus.CONFLICT) {
+ throw e;
+ }
+
+ if (e.getResponseBodyAsString().contains(EMAIL_IN_USE)) {
+ throw new InvalidOrderException(order, Status.ERROR_EMAIL_ALREADY_EXISTS,
+ "Email already in use");
+ } else {
+ throw new InvalidOrderException(order, Status.ERROR,
+ "Error occurred while processing the order. There might be an issue with the site having been registered previously");
+ }
+ } catch (Exception e) {
+ throw new InvalidOrderException(order, Status.ERROR,
+ "Error occurred while processing the order");
+ }
+
+ log.info("Egardia account created with userId: " + homeUserRel.userId()
+ + " and homeId: " + homeUserRel.homeId());
+
+ // Create the site entity from the activation order
+ SunriseSite newSite = new SunriseSite(order);
+
+ newSite.setEgardiaAccountDetails(
+ new SunriseSite.EgardiaAccountDetails(homeUserRel.userId(), homeUserRel.homeId()));
+ sunriseSiteRepository.save(newSite);
+
+ res.setDeviceDetails(newSite);
+
+ return new ResponseEntity<>(res, HttpStatus.CREATED);
+ }
+
+ /**
+ * Handles modification order
+ *
+ * @param order The order to handle
+ * @param res The response object to update
+ * @return ResponseEntity<SunriseOrderResponse> The response object
+ */
+ private ResponseEntity<SunriseOrderResponse> handleModificationOrder(SunriseOrder order,
+ SunriseSite site,
+ SunriseOrderResponse res) {
+ int homeId = site.getEgardiaAccountDetails().getHomeId();
+ int userId = site.getEgardiaAccountDetails().getUserId();
+
+ List<Runnable> rollbackActions = new ArrayList<>();
+
+ boolean isProvisioned =
+ site.getProvisioningState() == SunriseSite.ProvisioningState.PROVISIONED;
+
+ try {
+ // First step for Updating personal details
+ updateWithRollback(
+ site.getBasicUserInformation(),
+ order.getBasicUserInformation(),
+ newValue -> egardiaAccountService.updateAccountBasicInformation(homeId, userId,
+ newValue),
+ oldValue -> egardiaAccountService.updateAccountBasicInformation(homeId, userId,
+ oldValue),
+ rollbackActions
+ );
+
+ // First step for location updating: the home
+ updateWithRollback(
+ site.getHomeDetails(homeId),
+ order.getHomeDetails(homeId),
+ newValue -> egardiaAccountService.updateHomeDetails(homeId, newValue),
+ oldValue -> egardiaAccountService.updateHomeDetails(homeId, oldValue),
+ rollbackActions
+ );
+
+ // Second step for location updating: the gateway
+ if (isProvisioned) {
+ updateWithRollback(
+ site.getCpgLocationInfo(),
+ order.getCpgLocationInfo(),
+ newValue -> egardiaAccountService.updateCpgLocationInfo(homeId, newValue),
+ oldValue -> egardiaAccountService.updateCpgLocationInfo(homeId, oldValue),
+ rollbackActions
+ );
+ }
+
+ // Note: Due to miscommunication with Sunrise, we may get an empty list of devices
+ // in the modify order. In this case, we should not update the device details
+ // TODO: After release, we should check for an empty list of devices
+ if (!order.getDeviceDetails().isEmpty()) {
+ checkExistingDevices(order, site);
+ getRegistrationInfo(order);
+ }
+
+ List<String> changes = site.updateWithOrder(order);
+ if (!changes.isEmpty()) {
+ log.info("Site updated with changes: " + changes);
+ }
+ sunriseSiteRepository.save(site);
+
+ // Set the completed date to now
+ order.setCompletedDate(OffsetDateTime.now());
+ sunriseOrderRepository.save(order);
+
+ res.setDeviceDetails(site);
+
+ return new ResponseEntity<>(res, HttpStatus.OK);
+
+ } catch (Exception e) {
+ rollbackActions.forEach(Runnable::run);
+ throw new RuntimeException("Failed to process updates. Changes rolled back.", e);
+ }
+ }
+
+ private <T> void updateWithRollback(
+ T oldValue,
+ T newValue,
+ Consumer<T> updateAction,
+ Consumer<T> rollbackAction,
+ List<Runnable> rollbackActions
+ ) throws JsonProcessingException {
+ String oldValueJson = objectMapper.writeValueAsString(oldValue);
+ String newValueJson = objectMapper.writeValueAsString(newValue);
+
+ if (!oldValueJson.equals(newValueJson)) {
+ rollbackActions.add(() -> rollbackAction.accept(oldValue));
+ updateAction.accept(newValue);
+ }
+ }
+
+ /**
+ * Handles suspend order
+ *
+ * @param order The order to handle
+ * @param res The response object to update
+ * @return ResponseEntity<SunriseOrderResponse> The response object
+ */
+ private ResponseEntity<SunriseOrderResponse> handleSuspendOrder(SunriseOrder order,
+ SunriseSite site,
+ SunriseOrderResponse res) {
+ // Currently this is exactly the same as the cease order
+ sunriseOrderRepository.save(order);
greptile
logic: saving order before operations could lead to inconsistent state if subsequent operations fail
diff block
+package com.egardia.sunrise.service;
+
+import com.egardia.sunrise.entity.SunriseOrder;
+import com.egardia.sunrise.entity.SunriseOrderConfirmation;
+import com.egardia.sunrise.entity.SunriseSite;
+import com.egardia.sunrise.exception.NotFoundException;
+import com.egardia.sunrise.repository.SunriseOrderRepository;
+import com.egardia.sunrise.repository.SunriseSiteRepository;
+import com.egardia.sunrise.repository.SunriseTokenRepository;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import jakarta.transaction.Transactional;
+import java.time.OffsetDateTime;
+import java.util.Objects;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.http.MediaType;
+import org.springframework.http.ResponseEntity;
+import org.springframework.jms.annotation.JmsListener;
+import org.springframework.security.oauth2.client.OAuth2AuthorizeRequest;
+import org.springframework.security.oauth2.client.OAuth2AuthorizedClient;
+import org.springframework.security.oauth2.client.OAuth2AuthorizedClientManager;
+import org.springframework.security.oauth2.client.registration.ClientRegistrationRepository;
+import org.springframework.security.oauth2.core.OAuth2AccessToken;
+import org.springframework.stereotype.Service;
+import org.springframework.web.client.RestClient;
+import org.springframework.web.client.RestClientException;
+
+@Slf4j
+@Service
+public class SunriseOrderService {
+
+ @Autowired
+ private OAuth2AuthorizedClientManager authorizedClientServiceAndManager;
+ @Autowired
+ private ClientRegistrationRepository clientRegistrationRepository;
+ @Autowired
+ private SunriseOrderRepository sunriseOrderRepository;
+ @Autowired
+ private SunriseSiteRepository sunriseSiteRepository;
+ @Autowired
+ private SunriseTokenRepository sunriseTokenRepository;
+ @Autowired
+ private RestClient restClientEgardiaAlarmSystem;
+ @Autowired
+ private RestClient restClientSunrise;
+ @Autowired
+ private ObjectMapper objectMapper;
+
+ @Value("${sunrise.channel}")
+ private String sunriseChannel;
+ @Value("${sunrise.confirmation-path}")
+ private String sunriseConfirmationPath;
+
+ @Transactional
+ public void confirmOrder(String orderId) throws JsonProcessingException {
+ SunriseOrder order = sunriseOrderRepository.findByOrderId(orderId)
+ .orElseThrow(() -> new NotFoundException("Order not found"));
+ SunriseSite site = sunriseSiteRepository.findBySiteId(order.getSiteId())
+ .orElseThrow(() -> new NotFoundException("Site not found"));
+ confirmOrder(order, site);
+ }
+
+ @Transactional
+ public void confirmOrder(SunriseOrder order, SunriseSite site) throws JsonProcessingException {
+ log.info("Confirming order {} for site {}", order.getOrderId(), site.getSiteId());
+
+ // TODO: Generate device details, for now they are copied from the site
+ SunriseOrderConfirmation confirmation = SunriseOrderConfirmation.fromOrderAndSite(order,
+ site);
+
+ // Build an OAuth2 request for the sunrise-threescale provider
+ OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest.withClientRegistrationId(
+ "sunrise-threescale")
+ .principal("sunrise-order-controller")
+ .build();
+ OAuth2AuthorizedClient authorizedClient = authorizedClientServiceAndManager.authorize(
+ authorizeRequest);
+ // Get the token from the authorized client object
+ OAuth2AccessToken accessToken = Objects.requireNonNull(authorizedClient,
+ "Did not get authorized client").getAccessToken();
+
+ log.info("Confirming order {} for site {} with Sunrise", order.getOrderId(),
+ site.getSiteId());
+
+ // Call the Sunrise API to confirm the order
+ ResponseEntity<String> response = restClientSunrise.post()
+ .uri(sunriseConfirmationPath)
+ .header("Authorization", "Bearer " + accessToken.getTokenValue())
+ .header("x-correlation-id", order.getTransactionId())
+ .header("x-sunrise-channel", sunriseChannel)
+ .header("x-sunrise-external-reference-id", order.getOrderId())
+ .contentType(MediaType.APPLICATION_JSON)
+ .body(confirmation)
+ .retrieve()
+ .toEntity(String.class);
+
+ if (response.getStatusCode().isError()) {
+ throw new RuntimeException("Error occurred while confirming the order");
+ }
+
+ // Set the completed date to now
+ order.setCompletedDate(OffsetDateTime.now());
+ sunriseOrderRepository.save(order);
+
+ // Set the provisioning state to PROVISIONED
+ site.setProvisioningState(SunriseSite.ProvisioningState.PROVISIONED);
+ sunriseSiteRepository.save(site);
+
+ log.info("Order {} confirmed for site {}", order.getOrderId(), site.getSiteId());
+ }
+
+ @JmsListener(
+ destination = "discovery",
+ containerFactory = "boxtalkJmsListenerContainerFactory"
+ )
+ public void processMessage(final String message) {
+ log.info("New Message Received -- \n{}", message);
+ Pattern pattern = Pattern.compile("commonname=\"([^\"]*?)\"");
+ Matcher matcher = pattern.matcher(message);
+ if (matcher.find()) {
+ final String cn = matcher.group(1);
+ log.info("Have discovery for cn: {}", cn);
+ final String macTail = cn.substring(cn.length() - 6).toUpperCase();
+ final String gatewaySerialNumber = "02FA02" + macTail;
+ log.info("Have discovery for mac: {}", gatewaySerialNumber);
+ // Find site for this gateway
+ //TODO: Only with PENDING provStatus?
+
+ SunriseSite site = sunriseSiteRepository.findByDeviceDetails_DeviceSerialNumber(
+ gatewaySerialNumber)
+ .orElseThrow(() -> new NotFoundException("Site not found for gateway " + cn));
+ SunriseOrder order = sunriseOrderRepository.findByOrderId(site.getLatestOrder())
+ .orElseThrow(() -> new NotFoundException(
+ "Order not found for site " + site.getSiteId()));
+
+ if (order.getCompletedDate() != null) {
+ log.error("Order already completed for site: {}", site.getSiteId());
+ return;
+ }
+
+ log.info("Found site for site ID: {} with homeId {}", site.getSiteId(),
+ site.getEgardiaAccountDetails().getHomeId());
+ log.info("Found open order for order ID: {}", order.getOrderId());
+
+ site.setProvisioningState(SunriseSite.ProvisioningState.PROVISIONING);
+ sunriseSiteRepository.save(site);
+
+ // Start the gateway provisioning process
+ ResponseEntity<JsonNode> response = restClientEgardiaAlarmSystem.post()
+ .uri("/rest/v1/gateway/{homeId}/register/{mac}",
+ site.getEgardiaAccountDetails().getHomeId(), macTail)
+ .retrieve()
+ .onStatus(status -> status.value() >= 400, (errorReq, errorRes) -> {
+ log.error(
+ "Error occurred while provisioning gateway for site {}, provStatus: {}: {}",
+ site.getSiteId(), errorRes.getStatusCode(), errorRes.getBody());
+ throw new RestClientException(
+ "Error occurred while provisioning gateway for site "
+ + site.getSiteId());
+ })
+ .toEntity(JsonNode.class);
+ log.info("Gateway provisioned: {}", response);
+ try {
+ confirmOrder(order, site);
+ } catch (Exception e) {
+ log.error("Error occurred while confirming order for site {} with Sunrise",
+ site.getSiteId(), e);
+ }
greptile
logic: Error handling here could leave system in inconsistent state - site marked as PROVISIONED even if confirmOrder fails
suggested fix
confirmOrder(order, site);
} catch (Exception e) {
log.error("Error occurred while confirming order for site {} with Sunrise",
site.getSiteId(), e);
+ site.setProvisioningState(SunriseSite.ProvisioningState.FAILED);
sunriseSiteRepository.save(site);
+ throw e;
}
diff block
throw error;
}
}
+
+export async function deleteImageFromProject(
+ projectRoot: string,
+ imageName: string,
+): Promise<string> {
+ try {
+ const imageFolder = path.join(projectRoot, DefaultSettings.IMAGE_FOLDER);
+ const imagePath = path.join(imageFolder, imageName);
+ await fs.unlink(imagePath);
+ return imagePath;
+ } catch (error) {
+ console.error('Error deleting image:', error);
+ throw error;
+ }
+}
+
+export async function renameImageInProject(
+ projectRoot: string,
+ imageName: string,
+ newName: string,
+): Promise<string> {
+ if (!imageName || !newName) {
+ throw new Error('Image name and new name are required');
+ }
+
+ const imageFolder = path.join(projectRoot, DefaultSettings.IMAGE_FOLDER);
+ const oldImagePath = path.join(imageFolder, imageName);
+ const newImagePath = path.join(imageFolder, newName);
+
+ try {
+ await validateRename(oldImagePath, newImagePath);
+ await fs.rename(oldImagePath, newImagePath);
+
+ await updateImageReferences(projectRoot, imageName, newName);
+ return newImagePath;
+ } catch (error) {
+ console.error('Error renaming image:', error);
+ throw error;
+ }
+}
+
+const MAX_FILENAME_LENGTH = 255;
+const VALID_FILENAME_REGEX = /^[a-zA-Z0-9-_. ]+$/;
+
+async function validateRename(oldImagePath: string, newImagePath: string): Promise<void> {
+ try {
+ await fs.access(oldImagePath);
+ } catch (err) {
+ throw new Error(`Source image does not exist`);
+ }
+
+ const newFileName = path.basename(newImagePath);
+
+ if (newFileName.length > MAX_FILENAME_LENGTH) {
+ throw new Error(`File name is too long (max ${MAX_FILENAME_LENGTH} characters)`);
+ }
+
+ if (!VALID_FILENAME_REGEX.test(newFileName)) {
+ throw new Error(
+ 'File name can only contain letters, numbers, spaces, hyphens, underscores, and periods',
+ );
+ }
+
+ try {
+ await fs.access(newImagePath);
+ throw new Error(`A file with this name already exists`);
+ } catch (err: any) {
+ if (err.code !== 'ENOENT') {
+ throw err;
+ }
+ }
+}
+
+async function updateImageReferences(
+ projectRoot: string,
+ oldName: string,
+ newName: string,
+): Promise<void> {
+ const prefix = DefaultSettings.IMAGE_FOLDER.replace(/^public\//, '');
+ const oldImageUrl = `/${prefix}/${oldName}`;
+ const newImageUrl = `/${prefix}/${newName}`;
+ const pattern = new RegExp(oldImageUrl.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'g');
+
+ const sourceFiles = await findSourceFiles(projectRoot);
+ await Promise.all(
+ sourceFiles.map(async (file) => {
+ const content = await fs.readFile(file, 'utf8');
+ if (!content.includes(oldImageUrl)) {
+ return;
+ }
+
+ const updatedContent = content.replace(pattern, newImageUrl);
+ await fs.writeFile(file, updatedContent, 'utf8');
+ }),
greptile
style: No atomic write operation - if write fails midway, could leave file in inconsistent state. Consider writing to temp file first.
diff block
+use std::path::PathBuf;
+use std::collections::HashMap;
+use serde::{Serialize, Deserialize};
+use serde_yaml::{Value, Mapping};
+use anyhow::{Result, Context};
+use std::fs;
+use colored::*;
+
+#[derive(Debug, Serialize, Deserialize, Clone)]
+pub struct YamlFile {
+ pub models: Vec<Model>,
+}
+
+#[derive(Debug, Serialize, Deserialize, Clone)]
+pub struct Model {
+ pub name: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub description: Option<String>,
+ #[serde(default, skip_serializing_if = "Vec::is_empty")]
+ pub entities: Vec<Entity>,
+ #[serde(default, skip_serializing_if = "Vec::is_empty")]
+ pub dimensions: Vec<Dimension>,
+ #[serde(default, skip_serializing_if = "Vec::is_empty")]
+ pub measures: Vec<Measure>,
+ #[serde(flatten)]
+ pub extra: HashMap<String, Value>,
+}
+
+#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
+pub struct Entity {
+ pub name: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub ref_: Option<String>,
+ pub expr: String,
+ #[serde(rename = "type")]
+ pub entity_type: String,
+ pub description: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub project_path: Option<String>,
+ #[serde(flatten)]
+ pub extra: HashMap<String, Value>,
+}
+
+#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
+pub struct Dimension {
+ pub name: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub description: Option<String>,
+ #[serde(rename = "type", skip_serializing_if = "Option::is_none")]
+ pub type_: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub expr: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub semantic_type: Option<String>,
+ #[serde(default, skip_serializing_if = "should_skip_searchable")]
+ pub searchable: Option<bool>,
+ #[serde(flatten)]
+ pub extra: HashMap<String, Value>,
+}
+
+#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
+pub struct Measure {
+ pub name: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub description: Option<String>,
+ pub expr: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub agg: Option<String>,
+ #[serde(flatten)]
+ pub extra: HashMap<String, Value>,
+}
+
+#[derive(Debug)]
+pub struct YamlDiffMerger {
+ existing_yaml: PathBuf,
+ new_content: String,
+ backup_path: PathBuf,
+}
+
+#[derive(Debug)]
+pub struct ModelDiff {
+ added_dimensions: Vec<Dimension>,
+ removed_dimensions: Vec<String>,
+ added_measures: Vec<Measure>,
+ removed_measures: Vec<String>,
+ preserved_dimensions: Vec<Dimension>,
+ preserved_measures: Vec<Measure>,
+}
+
+#[derive(Debug)]
+pub struct DiffStats {
+ total_dimensions: usize,
+ total_measures: usize,
+ added_dimensions: usize,
+ added_measures: usize,
+ removed_dimensions: usize,
+ removed_measures: usize,
+ preserved_dimensions: usize,
+ preserved_measures: usize,
+}
+
+#[derive(Debug)]
+pub struct DiffResult {
+ changes: ModelDiff,
+ statistics: DiffStats,
+}
+
+impl YamlDiffMerger {
+ pub fn new(existing_yaml: PathBuf, new_content: String) -> Self {
+ let backup_path = existing_yaml.with_extension("yml.bak");
+ Self {
+ existing_yaml,
+ new_content,
+ backup_path,
+ }
+ }
+
+ fn parse_yaml_preserving_style(content: &str) -> Result<Value> {
+ serde_yaml::from_str(content).context("Failed to parse YAML content")
+ }
+
+ fn update_model_preserving_style(&self, existing_model: &mut Value, new_model: &Model) -> Result<()> {
+ if let Value::Mapping(map) = existing_model {
+ // Update dimensions while preserving style
+ if let Some(existing_dims) = map.get_mut("dimensions") {
+ if let Value::Sequence(dims) = existing_dims {
+ // Create a map of existing dimensions by name (case insensitive)
+ let mut dim_map: HashMap<String, &Value> = HashMap::new();
+ for dim in dims.iter() {
+ if let Some(name) = dim.get("name").and_then(|n| n.as_str()) {
+ dim_map.insert(name.to_lowercase(), dim);
+ }
+ }
+
+ // Update dimensions while preserving order and style
+ let mut new_dims = Vec::new();
+ for dim in &new_model.dimensions {
+ if let Some(&existing_dim) = dim_map.get(&dim.name.to_lowercase()) {
+ // Preserve existing dimension's style and casing
+ new_dims.push(existing_dim.clone());
+ } else {
+ // Add new dimension
+ new_dims.push(serde_yaml::to_value(dim)?);
+ }
+ }
+ *dims = new_dims;
+ }
+ }
+
+ // Update measures while preserving style
+ if let Some(existing_measures) = map.get_mut("measures") {
+ if let Value::Sequence(measures) = existing_measures {
+ // Create a map of existing measures by name (case insensitive)
+ let mut measure_map: HashMap<String, &Value> = HashMap::new();
+ for measure in measures.iter() {
+ if let Some(name) = measure.get("name").and_then(|n| n.as_str()) {
+ measure_map.insert(name.to_lowercase(), measure);
+ }
+ }
+
+ // Update measures while preserving order and style
+ let mut new_measures = Vec::new();
+ for measure in &new_model.measures {
+ if let Some(&existing_measure) = measure_map.get(&measure.name.to_lowercase()) {
+ // Preserve existing measure's style and casing
+ new_measures.push(existing_measure.clone());
+ } else {
+ // Add new measure
+ new_measures.push(serde_yaml::to_value(measure)?);
+ }
+ }
+ *measures = new_measures;
+ }
+ }
+ }
+ Ok(())
+ }
+
+ pub fn compute_diff(&self) -> Result<DiffResult> {
+ // Read and parse existing YAML
+ let existing_content = fs::read_to_string(&self.existing_yaml)
+ .context(format!("Failed to read file: {}", self.existing_yaml.display()))?;
+
+ let existing_yaml: YamlFile = match serde_yaml::from_str(&existing_content) {
+ Ok(yaml) => yaml,
+ Err(e) => {
+ // Try to parse as raw YAML first to see if it's valid YAML at all
+ match serde_yaml::from_str::<serde_yaml::Value>(&existing_content) {
+ Ok(_) => return Err(anyhow::anyhow!(
+ "File {} contains valid YAML but doesn't match expected structure. Error: {}",
+ self.existing_yaml.display(), e
+ )),
+ Err(_) => return Err(anyhow::anyhow!(
+ "File {} contains invalid YAML. Content:\n{}\nError: {}",
+ self.existing_yaml.display(), existing_content, e
+ )),
+ }
+ }
+ };
+
+ // Parse new YAML content
+ let new_yaml: YamlFile = match serde_yaml::from_str(&self.new_content) {
+ Ok(yaml) => yaml,
+ Err(e) => {
+ // Try to parse as raw YAML first to see if it's valid YAML at all
+ match serde_yaml::from_str::<serde_yaml::Value>(&self.new_content) {
+ Ok(_) => return Err(anyhow::anyhow!(
+ "New content contains valid YAML but doesn't match expected structure. Error: {}",
+ e
+ )),
+ Err(_) => return Err(anyhow::anyhow!(
+ "New content contains invalid YAML. Content:\n{}\nError: {}",
+ self.new_content, e
+ )),
+ }
+ }
+ };
+
+ // Validate models array is not empty
+ if existing_yaml.models.is_empty() {
+ return Err(anyhow::anyhow!(
+ "File {} contains no models",
+ self.existing_yaml.display()
+ ));
+ }
+ if new_yaml.models.is_empty() {
+ return Err(anyhow::anyhow!("New content contains no models"));
+ }
+
+ // Since we're dealing with a single model in the models array
+ let existing_model = &existing_yaml.models[0];
+ let new_model = &new_yaml.models[0];
+
+ // Create maps for quick lookups
+ let existing_dims: HashMap<_, _> = existing_model.dimensions.iter()
+ .map(|d| (d.name.to_lowercase(), d)).collect();
+ let existing_measures: HashMap<_, _> = existing_model.measures.iter()
+ .map(|m| (m.name.to_lowercase(), m)).collect();
+ let new_dims: HashMap<_, _> = new_model.dimensions.iter()
+ .map(|d| (d.name.to_lowercase(), d)).collect();
+ let new_measures: HashMap<_, _> = new_model.measures.iter()
+ .map(|m| (m.name.to_lowercase(), m)).collect();
+
+ let mut changes = ModelDiff {
+ added_dimensions: Vec::new(),
+ removed_dimensions: Vec::new(),
+ added_measures: Vec::new(),
+ removed_measures: Vec::new(),
+ preserved_dimensions: Vec::new(),
+ preserved_measures: Vec::new(),
+ };
+
+ // Process dimensions
+ for (name, dim) in &new_dims {
+ if existing_dims.contains_key(name) {
+ changes.preserved_dimensions.push(existing_dims[name].clone());
+ } else {
+ changes.added_dimensions.push((*dim).clone());
+ }
+ }
+ for (name, dim) in existing_dims.iter() {
+ if !new_dims.contains_key(name) {
+ changes.removed_dimensions.push(dim.name.clone());
+ }
+ }
+
+ // Process measures
+ for (name, measure) in &new_measures {
+ if existing_measures.contains_key(name) {
+ changes.preserved_measures.push(existing_measures[name].clone());
+ } else {
+ changes.added_measures.push((*measure).clone());
+ }
+ }
+ for (name, measure) in existing_measures.iter() {
+ if !new_measures.contains_key(name) {
+ changes.removed_measures.push(measure.name.clone());
+ }
+ }
+
+ let statistics = DiffStats {
+ total_dimensions: existing_dims.len(),
+ total_measures: existing_measures.len(),
+ added_dimensions: changes.added_dimensions.len(),
+ added_measures: changes.added_measures.len(),
+ removed_dimensions: changes.removed_dimensions.len(),
+ removed_measures: changes.removed_measures.len(),
+ preserved_dimensions: changes.preserved_dimensions.len(),
+ preserved_measures: changes.preserved_measures.len(),
+ };
+
+ Ok(DiffResult { changes, statistics })
+ }
+
+ pub fn preview_changes(&self, diff_result: &DiffResult) {
+ println!("\nChanges to be applied:");
+ println!("----------------------");
+
+ if !diff_result.changes.added_dimensions.is_empty() {
+ println!("\nNew dimensions to be added:");
+ for dim in &diff_result.changes.added_dimensions {
+ println!(" + {}", dim.name.green());
+ }
+ }
+
+ if !diff_result.changes.added_measures.is_empty() {
+ println!("\nNew measures to be added:");
+ for measure in &diff_result.changes.added_measures {
+ println!(" + {}", measure.name.green());
+ }
+ }
+
+ if !diff_result.changes.removed_dimensions.is_empty() {
+ println!("\nDimensions to be removed:");
+ for name in &diff_result.changes.removed_dimensions {
+ println!(" - {}", name.red());
+ }
+ }
+
+ if !diff_result.changes.removed_measures.is_empty() {
+ println!("\nMeasures to be removed:");
+ for name in &diff_result.changes.removed_measures {
+ println!(" - {}", name.red());
+ }
+ }
+
+ if !diff_result.changes.preserved_dimensions.is_empty() {
+ println!("\nPreserved dimensions (keeping existing configuration):");
+ for dim in &diff_result.changes.preserved_dimensions {
+ println!(" • {}", dim.name.yellow());
+ }
+ }
+
+ if !diff_result.changes.preserved_measures.is_empty() {
+ println!("\nPreserved measures (keeping existing configuration):");
+ for measure in &diff_result.changes.preserved_measures {
+ println!(" • {}", measure.name.yellow());
+ }
+ }
+
+ println!("\nStatistics:");
+ println!(" Dimensions:");
+ println!(" Total: {}", diff_result.statistics.total_dimensions);
+ println!(" Added: {}", diff_result.statistics.added_dimensions);
+ println!(" Removed: {}", diff_result.statistics.removed_dimensions);
+ println!(" Preserved: {}", diff_result.statistics.preserved_dimensions);
+ println!(" Measures:");
+ println!(" Total: {}", diff_result.statistics.total_measures);
+ println!(" Added: {}", diff_result.statistics.added_measures);
+ println!(" Removed: {}", diff_result.statistics.removed_measures);
+ println!(" Preserved: {}", diff_result.statistics.preserved_measures);
+ }
+
+ pub fn apply_changes(&self, diff_result: &DiffResult) -> Result<()> {
+ // Create backup
+ fs::copy(&self.existing_yaml, &self.backup_path)
+ .context("Failed to create backup file")?;
+
+ // Read existing YAML preserving style
+ let existing_content = fs::read_to_string(&self.existing_yaml)
+ .context("Failed to read existing YAML file")?;
+ let mut existing_yaml = Self::parse_yaml_preserving_style(&existing_content)?;
+
+ // Parse new content
+ let new_yaml: YamlFile = serde_yaml::from_str(&self.new_content)
+ .context("Failed to parse new YAML content")?;
+
+ // Update the existing YAML while preserving style
+ if let Value::Mapping(map) = &mut existing_yaml {
+ if let Some(Value::Sequence(models)) = map.get_mut("models") {
+ if !models.is_empty() {
+ // Update the first model
+ self.update_model_preserving_style(&mut models[0], &new_yaml.models[0])?;
+ }
+ }
+ }
+
+ // Write to temporary file using the original style
+ let temp_path = self.existing_yaml.with_extension("yml.tmp");
+ let yaml_str = serde_yaml::to_string(&existing_yaml)?;
+ fs::write(&temp_path, yaml_str)
+ .context("Failed to write temporary file")?;
+
+ // Atomic rename
+ fs::rename(&temp_path, &self.existing_yaml)
+ .context("Failed to apply changes")?;
+
+ // Remove backup if successful
+ fs::remove_file(&self.backup_path)
+ .context("Failed to remove backup file")?;
+
+ Ok(())
+ }
greptile
logic: The backup file is not restored if an error occurs during the update process between lines 378-386, which could leave the file in an inconsistent state.
diff block
},
);
- const syncCursor = messageChannel.syncCursor;
-
- const { messageExternalIds, messageExternalIdsToDelete, nextSyncCursor } =
- await this.messagingGetMessageListService.getPartialMessageList(
+ const partialMessageLists =
+ await this.messagingGetMessageListService.getPartialMessageLists(
connectedAccount,
- syncCursor,
+ messageChannel,
);
greptile
logic: getPartialMessageLists is not properly error handled - if one folder fails, the entire sync could be left in an inconsistent state
diff block
throw error;
}
}
+
+export async function deleteNextJsPage(projectRoot: string, pagePath: string, isDir: boolean) {
+ try {
+ const routerConfig = await detectRouterType(projectRoot);
+
+ if (!routerConfig) {
+ throw new Error('Could not detect Next.js router type');
+ }
+
+ if (routerConfig.type !== 'app') {
+ throw new Error('Page deletion is only supported for App Router projects for now.');
+ }
+
+ const fullPath = path.join(routerConfig.basePath, pagePath);
+
+ // Check if file/folder exists
+ let stats;
+ try {
+ stats = await fs.stat(fullPath);
+ } catch (err: any) {
+ if (err.code === 'ENOENT') {
+ throw new Error('Selected page not found');
+ }
+ throw err;
+ }
+
+ if (isDir) {
+ await fs.rm(fullPath, { recursive: true, force: true });
+ } else {
+ const selectedFilePath = path.join(fullPath, 'page.tsx');
+ await fs.unlink(selectedFilePath);
+ await cleanupEmptyFolders(path.dirname(fullPath));
+ }
+
+ console.log(`Deleted: ${fullPath}`);
+ return true;
+ } catch (error) {
+ console.error('Error deleting page:', error);
+ throw error;
+ }
+}
+
+async function cleanupEmptyFolders(folderPath: string) {
+ while (folderPath !== path.dirname(folderPath)) {
+ try {
+ const files = await fs.readdir(folderPath);
+ if (files.length === 0) {
+ await fs.rm(folderPath, { recursive: true, force: true });
+ folderPath = path.dirname(folderPath);
+ } else {
+ break;
+ }
+ } catch (err: any) {
+ if (err.code === 'ENOENT') {
+ throw err;
+ }
greptile
style: throwing ENOENT error during cleanup could leave directory structure in inconsistent state. Consider logging and continuing instead.
diff block
+import { cn } from 'lib/utils/css-classes'
+import React, { useCallback, useEffect, useRef, useState } from 'react'
+
+type ResizableDivProps = {
+ defaultWidth: number
+ minWidth?: number
+ maxWidth?: number
+ onResize: (width: number) => void
+ children?: React.ReactNode
+ className?: string
+ style?: React.CSSProperties
+}
+
+export function ResizableDiv({
+ defaultWidth,
+ minWidth = 100,
+ maxWidth = 1000,
+ onResize,
+ children,
+ className,
+ style,
+ ...props
+}: ResizableDivProps): JSX.Element {
+ const [width, setWidth] = useState(defaultWidth)
+ const containerRef = useRef<HTMLDivElement>(null)
+ const startXRef = useRef<number>(0)
+ const startWidthRef = useRef<number>(0)
+ const isResizing = useRef(false)
+ const rafRef = useRef<number | null>(null)
+ const currentWidthRef = useRef(defaultWidth)
+
+ // Update the current width ref when state changes
+ useEffect(() => {
+ currentWidthRef.current = width
+ }, [width])
+
+ // Function to apply width directly to DOM for smoother resizing
+ const applyWidth = useCallback((newWidth: number) => {
+ if (containerRef.current) {
+ containerRef.current.style.width = `${newWidth}px`
+ }
+ currentWidthRef.current = newWidth
+ }, [])
+
+ const handleMouseDown = useCallback((e: React.MouseEvent | React.TouchEvent) => {
+ document.body.classList.add('is-resizing')
+ const clientX = 'touches' in e ? e.touches[0].clientX : e.clientX
+ startXRef.current = clientX
+ startWidthRef.current = currentWidthRef.current
+ isResizing.current = true
+ e.preventDefault()
+ }, [])
greptile
logic: handleMouseDown is used for both mouse and touch events but doesn't handle touch cancel events, which could leave the component in an inconsistent state if touch interaction is interrupted
diff block
return resp;
},
(error: AxiosError) => {
+ const errorCode = error.response?.status;
+ //402 is the payment required error code
+ if (errorCode === 402) {
+ window.location.href = createBusterRoute({
+ route: BusterRoutes.INFO_GETTING_STARTED
+ });
+ }
greptile
logic: Redirecting before rejecting the error could leave pending operations in an inconsistent state. Consider returning after the redirect.
suggested fix
if (errorCode === 402) {
window.location.href = createBusterRoute({
route: BusterRoutes.INFO_GETTING_STARTED
});
+ return Promise.reject(error);
}
diff block
+use std::{future::Future, net::Ipv4Addr, sync::Arc, time::Duration};
+
+use rand::Rng;
+use reqwest::Client;
+use serde::Deserialize;
+use tokio::{
+ sync::{Mutex, RwLock},
+ task::JoinHandle,
+};
+use url::Url;
+
+pub struct ServiceDiscovery {
+ fetch_endpoint: Url,
+ last: RwLock<Vec<ApiServer>>,
+ handle: Mutex<Option<JoinHandle<()>>>,
+}
+
+impl ServiceDiscovery {
+ pub fn new(fetch_endpoint: Url) -> Arc<Self> {
+ Arc::new(ServiceDiscovery {
+ fetch_endpoint,
+ last: RwLock::new(Vec::new()),
+ handle: Mutex::new(None),
+ })
+ }
+
+ /// Starts a background tokio task that periodically fetches the endpoint and calls `cb`.
+ pub fn start<F, Fut, E>(self: &Arc<Self>, cb: F)
+ where
+ F: Fn(Vec<ApiServer>) -> Fut + Send + Sync + 'static,
+ Fut: Future<Output = Result<(), E>> + Send + 'static,
+ E: std::fmt::Debug,
+ {
+ let mut guard = self.handle.try_lock().expect("already started");
+ assert!(guard.is_none(), "already started");
+
+ let self2 = self.clone();
+ *guard = Some(tokio::task::spawn(async move {
+ let client = Client::new();
+
+ loop {
+ let res = match self2.fetch_inner(&client).await {
+ Ok(res) => res,
+ Err(err) => {
+ tracing::error!(?err, "fetch service discovery failed");
+ continue;
+ }
+ };
+
+ if let Err(err) = cb(res.servers.clone()).await {
+ tracing::error!(?err, "service discovery callback failed");
+ }
+
+ {
+ let mut guard = self2.last.write().await;
+ *guard = res.servers;
+ }
+
+ let duration = Duration::from_secs(60)
+ + rand::thread_rng().gen_range(Duration::ZERO..Duration::from_secs(1));
+ tokio::time::sleep(duration).await;
+ }
+ }));
+ }
+
+ /// Returns the last retrieved value without fetching.
+ pub async fn get(&self) -> Vec<ApiServer> {
+ self.last.read().await.clone()
+ }
+
+ /// Manually fetches the endpoint.
+ pub async fn fetch(&self) -> Result<Vec<ApiServer>, reqwest::Error> {
+ let client = Client::new();
+ Ok(self.fetch_inner(&client).await?.servers)
+ }
+
+ async fn fetch_inner(&self, client: &Client) -> Result<ApiResponse, reqwest::Error> {
+ Ok(client
+ .get(self.fetch_endpoint.clone())
+ .send()
+ .await?
+ .error_for_status()?
+ .json::<ApiResponse>()
+ .await?)
+ }
+}
+
+impl Drop for ServiceDiscovery {
+ // Stops the periodic handle if one exists.
+ fn drop(&mut self) {
+ if let Some(handle) = self.handle.try_lock().expect("should not be locked").take() {
+ handle.abort();
+ }
greptile
style: aborting task without cleanup/notification could leave system in inconsistent state
diff block
+import { InjectRepository } from '@nestjs/typeorm';
+
+import { ServerBlockNoteEditor } from '@blocknote/server-util';
+import chalk from 'chalk';
+import { Command } from 'nest-commander';
+import { FieldMetadataType } from 'twenty-shared';
+import { Repository } from 'typeorm';
+
+import {
+ ActiveWorkspacesCommandOptions,
+ ActiveWorkspacesCommandRunner,
+} from 'src/database/commands/active-workspaces.command';
+import { isCommandLogger } from 'src/database/commands/logger';
+import { FeatureFlagKey } from 'src/engine/core-modules/feature-flag/enums/feature-flag-key.enum';
+import { FeatureFlag } from 'src/engine/core-modules/feature-flag/feature-flag.entity';
+import { Workspace } from 'src/engine/core-modules/workspace/workspace.entity';
+import { FieldMetadataEntity } from 'src/engine/metadata-modules/field-metadata/field-metadata.entity';
+import { ObjectMetadataEntity } from 'src/engine/metadata-modules/object-metadata/object-metadata.entity';
+import { WorkspaceMetadataVersionService } from 'src/engine/metadata-modules/workspace-metadata-version/services/workspace-metadata-version.service';
+import { generateMigrationName } from 'src/engine/metadata-modules/workspace-migration/utils/generate-migration-name.util';
+import {
+ WorkspaceMigrationColumnActionType,
+ WorkspaceMigrationColumnCreate,
+ WorkspaceMigrationTableAction,
+ WorkspaceMigrationTableActionType,
+} from 'src/engine/metadata-modules/workspace-migration/workspace-migration.entity';
+import { WorkspaceMigrationService } from 'src/engine/metadata-modules/workspace-migration/workspace-migration.service';
+import { TwentyORMGlobalManager } from 'src/engine/twenty-orm/twenty-orm-global.manager';
+import { computeObjectTargetTable } from 'src/engine/utils/compute-object-target-table.util';
+import { computeTableName } from 'src/engine/utils/compute-table-name.util';
+import { WorkspaceDataSourceService } from 'src/engine/workspace-datasource/workspace-datasource.service';
+import { WorkspaceMigrationRunnerService } from 'src/engine/workspace-manager/workspace-migration-runner/workspace-migration-runner.service';
+
+@Command({
+ name: 'upgrade-0.42:migrate-rich-text-field',
+ description: 'Migrate RICH_TEXT fields to new composite structure',
+})
+export class MigrateRichTextFieldCommand extends ActiveWorkspacesCommandRunner {
+ constructor(
+ @InjectRepository(Workspace, 'core')
+ protected readonly workspaceRepository: Repository<Workspace>,
+ @InjectRepository(FieldMetadataEntity, 'metadata')
+ private readonly fieldMetadataRepository: Repository<FieldMetadataEntity>,
+ @InjectRepository(ObjectMetadataEntity, 'metadata')
+ private readonly objectMetadataRepository: Repository<ObjectMetadataEntity>,
+ @InjectRepository(FeatureFlag, 'core')
+ protected readonly featureFlagRepository: Repository<FeatureFlag>,
+ private readonly workspaceDataSourceService: WorkspaceDataSourceService,
+ private readonly twentyORMGlobalManager: TwentyORMGlobalManager,
+ private readonly workspaceMigrationService: WorkspaceMigrationService,
+ private readonly workspaceMigrationRunnerService: WorkspaceMigrationRunnerService,
+ private readonly workspaceMetadataVersionService: WorkspaceMetadataVersionService,
+ ) {
+ super(workspaceRepository);
+ }
+
+ async executeActiveWorkspacesCommand(
+ _passedParam: string[],
+ options: ActiveWorkspacesCommandOptions,
+ workspaceIds: string[],
+ ): Promise<void> {
+ this.logger.log(
+ 'Running command to migrate RICH_TEXT fields to new composite structure',
+ );
+
+ if (isCommandLogger(this.logger)) {
+ this.logger.setVerbose(options.verbose ?? false);
+ }
+
+ let workspaceIterator = 1;
+
+ for (const workspaceId of workspaceIds) {
+ this.logger.log(
+ `Running command for workspace ${workspaceId} ${workspaceIterator}/${workspaceIds.length}`,
+ );
+
+ const richTextFields = await this.fieldMetadataRepository.find({
+ where: {
+ workspaceId,
+ type: FieldMetadataType.RICH_TEXT,
+ },
+ });
+
+ if (!richTextFields.length) {
+ this.logger.log('No RICH_TEXT fields found in this workspace');
+ workspaceIterator++;
+ continue;
+ }
+
+ this.logger.log(`Found ${richTextFields.length} RICH_TEXT fields`);
+
+ for (const richTextField of richTextFields) {
+ const newRichTextField: Partial<FieldMetadataEntity> = {
+ ...richTextField,
+ name: `${richTextField.name}V2`,
+ id: undefined,
+ type: FieldMetadataType.RICH_TEXT_V2,
+ defaultValue: null,
+ };
+
+ await this.fieldMetadataRepository.insert(newRichTextField);
+
+ const objectMetadata = await this.objectMetadataRepository.findOne({
+ where: { id: richTextField.objectMetadataId },
+ });
+
+ if (objectMetadata === null) {
+ this.logger.log(
+ `Object metadata not found for rich text field ${richTextField.name} in workspace ${workspaceId}`,
+ );
+ continue;
+ }
+
+ await this.workspaceMigrationService.createCustomMigration(
+ generateMigrationName(
+ `migrate-rich-text-field-${objectMetadata.nameSingular}-${richTextField.name}`,
+ ),
+ workspaceId,
+ [
+ {
+ name: computeObjectTargetTable(objectMetadata),
+ action: WorkspaceMigrationTableActionType.ALTER,
+ columns: [
+ {
+ action: WorkspaceMigrationColumnActionType.CREATE,
+ columnName: `${richTextField.name}V2Blocknote`,
+ columnType: 'text',
+ isNullable: true,
+ defaultValue: null,
+ } satisfies WorkspaceMigrationColumnCreate,
+ {
+ action: WorkspaceMigrationColumnActionType.CREATE,
+ columnName: `${richTextField.name}V2Markdown`,
+ columnType: 'text',
+ isNullable: true,
+ defaultValue: null,
+ } satisfies WorkspaceMigrationColumnCreate,
+ ],
+ } satisfies WorkspaceMigrationTableAction,
+ ],
+ );
+ }
greptile
style: No transaction wrapping the field creation and migration. If either fails, could leave database in inconsistent state.
diff block
+import { FileService } from 'src/engine/core-modules/file/services/file.service';
+import { Process } from 'src/engine/core-modules/message-queue/decorators/process.decorator';
+import { Processor } from 'src/engine/core-modules/message-queue/decorators/processor.decorator';
+import { MessageQueue } from 'src/engine/core-modules/message-queue/message-queue.constants';
+
+export type FileWorkspaceFolderDeletionJobData = {
+ workspaceId: string;
+};
+
+@Processor(MessageQueue.workspaceQueue)
+export class FileWorkspaceFolderDeletionJob {
+ constructor(private readonly fileService: FileService) {}
+
+ @Process(FileWorkspaceFolderDeletionJob.name)
+ async handle(data: FileWorkspaceFolderDeletionJobData): Promise<void> {
+ const { workspaceId } = data;
+
+ await this.fileService.deleteWorkspaceFolder(workspaceId);
+ }
greptile
logic: No error handling for deleteWorkspaceFolder failure. Could leave workspace in inconsistent state if deletion fails.
diff block
+import gql from 'graphql-tag';
+import request from 'supertest';
+import { makeGraphqlAPIRequest } from 'test/integration/graphql/utils/make-graphql-api-request.util';
+import { updateFeatureFlagFactory } from 'test/integration/graphql/utils/update-feature-flag-factory.util';
+
+import { SEED_APPLE_WORKSPACE_ID } from 'src/database/typeorm-seeds/core/workspaces';
+import { ErrorCode } from 'src/engine/core-modules/graphql/utils/graphql-errors.util';
+import { PermissionsExceptionMessage } from 'src/engine/metadata-modules/permissions/permissions.exception';
+
+const client = request(`http://localhost:${APP_PORT}`);
+
+describe('WorkspaceResolver', () => {
+ let originalWorkspaceState;
+
+ beforeAll(async () => {
+ // Store original workspace state
+ const query = gql`
+ query getWorkspace {
+ currentWorkspace {
+ customDomain
+ displayName
+ isGoogleAuthEnabled
+ isMicrosoftAuthEnabled
+ isPasswordAuthEnabled
+ logo
+ isPublicInviteLinkEnabled
+ subdomain
+ isCustomDomainEnabled
+ }
+ }
+ `;
+
+ const response = await makeGraphqlAPIRequest({ query });
+
+ originalWorkspaceState = response.body.data.currentWorkspace;
+
+ const enablePermissionsQuery = updateFeatureFlagFactory(
+ SEED_APPLE_WORKSPACE_ID,
+ 'IsPermissionsEnabled',
+ true,
+ );
+
+ await makeGraphqlAPIRequest(enablePermissionsQuery);
+ });
+
+ afterAll(async () => {
+ const disablePermissionsQuery = updateFeatureFlagFactory(
+ SEED_APPLE_WORKSPACE_ID,
+ 'IsPermissionsEnabled',
+ false,
+ );
+
+ await makeGraphqlAPIRequest(disablePermissionsQuery);
+
+ // Restore workspace state
+ const restoreQuery = gql`
+ mutation updateWorkspace {
+ updateWorkspace(data: {
+ displayName: "${originalWorkspaceState.displayName}",
+ subdomain: "${originalWorkspaceState.subdomain}",
+ customDomain: "${originalWorkspaceState.customDomain}",
+ logo: "${originalWorkspaceState.logo}",
+ isGoogleAuthEnabled: ${originalWorkspaceState.isGoogleAuthEnabled},
+ isMicrosoftAuthEnabled: ${originalWorkspaceState.isMicrosoftAuthEnabled},
+ isPasswordAuthEnabled: ${originalWorkspaceState.isPasswordAuthEnabled}
+ }) {
+ id
+ }
+ }
+ `;
greptile
logic: Restore mutation doesn't include isPublicInviteLinkEnabled from original state, which could leave test environment in inconsistent state
```suggestion
// Restore workspace state
const restoreQuery = gql`
mutation updateWorkspace {
updateWorkspace(data: {
displayName: "${originalWorkspaceState.displayName}",
subdomain: "${originalWorkspaceState.subdomain}",
customDomain: "${originalWorkspaceState.customDomain}",
logo: "${originalWorkspaceState.logo}",
isGoogleAuthEnabled: ${originalWorkspaceState.isGoogleAuthEnabled},
isMicrosoftAuthEnabled: ${originalWorkspaceState.isMicrosoftAuthEnabled},
+ isPasswordAuthEnabled: ${originalWorkspaceState.isPasswordAuthEnabled},
+ isPublicInviteLinkEnabled: ${originalWorkspaceState.isPublicInviteLinkEnabled}
}) {
id
}
}
`;
```
diff block
f"Some folders/drives were not retrieved. IDs: {remaining_folders}"
)
+ def _checkpointed_oauth_retrieval(
+ self,
+ is_slim: bool,
+ checkpoint: GoogleDriveCheckpoint,
+ start: SecondsSinceUnixEpoch | None = None,
+ end: SecondsSinceUnixEpoch | None = None,
+ ) -> Iterator[GoogleDriveFileType]:
+ drive_files = self._manage_oauth_retrieval(
+ is_slim=is_slim,
+ checkpoint=checkpoint,
+ start=start,
+ end=end,
+ )
+ if is_slim:
+ return drive_files
+
+ return self._checkpoint_yield(
+ drive_files=drive_files,
+ checkpoint=checkpoint,
+ )
+
+ def _manage_oauth_retrieval(
+ self,
+ is_slim: bool,
+ checkpoint: GoogleDriveCheckpoint,
+ start: SecondsSinceUnixEpoch | None = None,
+ end: SecondsSinceUnixEpoch | None = None,
+ ) -> Iterator[GoogleDriveFileType]:
+ if checkpoint.completion_stage == DriveRetrievalStage.START:
+ checkpoint.completion_stage = DriveRetrievalStage.OAUTH_FILES
+
+ drive_service = get_drive_service(self.creds, self.primary_admin_email)
+
+ if checkpoint.completion_stage == DriveRetrievalStage.OAUTH_FILES:
+ checkpoint.curr_completion_key = checkpoint.completion_stage
+ yield from self._oauth_retrieval_all_files(
+ drive_service=drive_service,
+ is_slim=is_slim,
+ checkpoint=checkpoint,
+ start=start,
+ end=end,
+ )
+ checkpoint.completion_stage = DriveRetrievalStage.DRIVE_IDS
+
+ all_requested = (
+ self.include_files_shared_with_me
+ and self.include_my_drives
+ and self.include_shared_drives
+ )
+ if all_requested:
+ # If all 3 are true, we already yielded from get_all_files_for_oauth
+ checkpoint.completion_stage = DriveRetrievalStage.DONE
+ return
+
+ drive_ids_to_retrieve, folder_ids_to_retrieve = self._determine_retrieval_ids(
+ checkpoint, is_slim, DriveRetrievalStage.SHARED_DRIVE_FILES
+ )
+
+ if checkpoint.completion_stage == DriveRetrievalStage.SHARED_DRIVE_FILES:
+ yield from self._oauth_retrieval_drives(
+ is_slim=is_slim,
+ drive_service=drive_service,
+ drive_ids_to_retrieve=drive_ids_to_retrieve,
+ checkpoint=checkpoint,
+ start=start,
+ end=end,
+ )
+
+ checkpoint.completion_stage = DriveRetrievalStage.FOLDER_FILES
+
+ if checkpoint.completion_stage == DriveRetrievalStage.FOLDER_FILES:
+ checkpoint.curr_completion_key = checkpoint.completion_stage
+ yield from self._oauth_retrieval_folders(
+ is_slim=is_slim,
+ drive_service=drive_service,
+ drive_ids_to_retrieve=drive_ids_to_retrieve,
+ folder_ids_to_retrieve=folder_ids_to_retrieve,
+ checkpoint=checkpoint,
+ start=start,
+ end=end,
+ )
+
+ checkpoint.completion_stage = DriveRetrievalStage.DONE
+
def _fetch_drive_items(
self,
is_slim: bool,
+ checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> Iterator[GoogleDriveFileType]:
+ assert checkpoint is not None, "Must provide checkpoint for full retrieval"
retrieval_method = (
self._manage_service_account_retrieval
if isinstance(self.creds, ServiceAccountCredentials)
- else self._manage_oauth_retrieval
+ else self._checkpointed_oauth_retrieval
)
- drive_files = retrieval_method(
+
+ return retrieval_method(
is_slim=is_slim,
+ checkpoint=checkpoint,
start=start,
end=end,
)
- return drive_files
-
def _extract_docs_from_google_drive(
self,
+ checkpoint: GoogleDriveCheckpoint,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
- ) -> GenerateDocumentsOutput:
- # Create a larger process pool for file conversion
- with ThreadPoolExecutor(max_workers=8) as executor:
- # Prepare a partial function with the credentials and admin email
- convert_func = partial(
- _convert_single_file,
- self.creds,
- self.primary_admin_email,
- image_analysis_llm=self.image_analysis_llm, # Use the mixin's LLM
- )
-
- # Fetch files in batches
- files_batch: list[GoogleDriveFileType] = []
- for file in self._fetch_drive_items(is_slim=False, start=start, end=end):
- files_batch.append(file)
+ ) -> Iterator[list[Document | ConnectorFailure]]:
+ try:
+ # Create a larger process pool for file conversion
+ with ThreadPoolExecutor(max_workers=8) as executor:
+ # Prepare a partial function with the credentials and admin email
+ convert_func = partial(
+ _convert_single_file,
+ self.creds,
+ self.primary_admin_email,
+ image_analysis_llm=self.image_analysis_llm, # Use the mixin's LLM
+ )
- if len(files_batch) >= self.batch_size:
- # Process the batch
+ # Fetch files in batches
+ batches_complete = 0
+ files_batch: list[GoogleDriveFileType] = []
+ for file in self._fetch_drive_items(
+ is_slim=False,
+ checkpoint=checkpoint,
+ start=start,
+ end=end,
+ ):
+ files_batch.append(file)
+
+ if len(files_batch) >= self.batch_size:
+ # Process the batch
+ futures = [
+ executor.submit(convert_func, file) for file in files_batch
+ ]
+ documents = []
+ for future in as_completed(futures):
+ try:
+ doc = future.result()
+ if doc is not None:
+ documents.append(doc)
+ except Exception as e:
+ logger.error(f"Error converting file: {e}")
+
+ if documents:
+ yield documents
+ batches_complete += 1
+ files_batch = []
+
+ if batches_complete > BATCHES_PER_CHECKPOINT:
+ checkpoint.retrieved_ids = list(self._retrieved_ids)
+ return # create a new checkpoint
greptile
logic: checkpoint.retrieved_ids is set but checkpoint.completion_stage is not updated before returning, could cause inconsistent state
suggested fix
if batches_complete > BATCHES_PER_CHECKPOINT:
checkpoint.retrieved_ids = list(self._retrieved_ids)
+ checkpoint.completion_stage = DriveRetrievalStage.SHARED_DRIVE_FILES # Update stage before returning
return # create a new checkpoint
diff block
return Response({"success": True})
+ @action(methods=["POST"], detail=False)
+ def bulk(self, request, **kwargs):
+ issue_ids = request.data.get("ids", [])
+ action = request.data.get("action")
+
+ if action == "resolve":
+ self.queryset.filter(id__in=issue_ids).update(status=ErrorTrackingIssue.Status.RESOLVED)
+ elif action == "assign":
+ assignments = ErrorTrackingIssueAssignment.objects.filter(issue_id__in=issue_ids)
+
+ # given bulk operation it's actually easier to delete all assignments and recreate them
+ assignments.delete()
greptile
logic: Deleting all assignments without a transaction could leave data in inconsistent state if bulk_create fails
suggested fix
# given bulk operation it's actually easier to delete all assignments and recreate them
+ with transaction.atomic():
assignments.delete()
diff block
...(apiAssignment?.submission?.submitted_at
? { icon: Icons.Completed, tooltip: "Submitted" }
greptile
logic: Inconsistent state usage - checks apiAssignment.submission but uses apiSubmission elsewhere
diff block
+import { SupabaseClient } from "@supabase/supabase-js";
+import { AttachParams } from "../products/AttachParams.js";
+import { BillingInterval } from "@autumn/shared";
+import Stripe from "stripe";
+import { getStripeSubItems } from "@/external/stripe/stripePriceUtils.js";
+import { createStripeCli } from "@/external/stripe/utils.js";
+import { getStripeExpandedInvoice } from "@/external/stripe/stripeInvoiceUtils.js";
+import {
+ attachToInsertParams,
+ initProductInStripe,
+} from "@/internal/products/productUtils.js";
+import { InvoiceService } from "../invoices/InvoiceService.js";
+import { createFullCusProduct } from "./createFullCusProduct.js";
+import { createStripeCusIfNotExists } from "@/external/stripe/stripeCusUtils.js";
+import { createStripeSubThroughInvoice } from "@/external/stripe/stripeInvoiceSubUtils.js";
+
+export const handleAddDefaultPaid = async ({
+ sb,
+ attachParams,
+ logger,
+}: {
+ sb: SupabaseClient;
+ attachParams: AttachParams;
+ logger: any;
+}) => {
+ const { org, customer, products, freeTrial } = attachParams;
+ const product = products[0];
+
+ // 1. Create stripe customer if not exists
+ await Promise.all([
+ createStripeCusIfNotExists({
+ sb,
+ org,
+ env: customer.env,
+ customer,
+ logger,
+ }),
+ initProductInStripe({
+ sb,
+ org,
+ env: customer.env,
+ product,
+ logger,
+ }),
+ ]);
+
+ const stripeCli = createStripeCli({ org, env: customer.env });
+
+ let itemSets = await getStripeSubItems({
+ attachParams,
+ });
+
+ let subscriptions: Stripe.Subscription[] = [];
+ let invoiceIds: string[] = [];
+
+ for (const itemSet of itemSets) {
+ if (itemSet.interval === BillingInterval.OneOff) {
+ continue;
+ }
+
+ const { items } = itemSet;
+
+ try {
+ // Should create 2 subscriptions
+ let subscription = await createStripeSubThroughInvoice({
+ stripeCli,
+ customer,
+ org,
+ items,
+ freeTrial,
+ metadata: itemSet.subMeta,
+ prices: itemSet.prices,
+ });
+
+ subscriptions.push(subscription);
+ invoiceIds.push(subscription.latest_invoice as string);
+ } catch (error: any) {
+ throw error;
+ }
+ }
+
+ // Add product and entitlements to customer
+ const batchInsert = [];
+ for (const product of products) {
+ batchInsert.push(
+ createFullCusProduct({
+ sb,
+ attachParams: attachToInsertParams(attachParams, product),
+ subscriptionIds: subscriptions.map((s) => s.id),
+ subscriptionId:
+ subscriptions.length > 0 ? subscriptions[0].id : undefined,
+ })
+ );
+ }
+ await Promise.all(batchInsert);
+
+ for (const invoiceId of invoiceIds) {
+ try {
+ const invoice = await getStripeExpandedInvoice({
+ stripeCli,
+ stripeInvoiceId: invoiceId,
+ });
+
+ await InvoiceService.createInvoiceFromStripe({
+ sb,
+ stripeInvoice: invoice,
+ internalCustomerId: customer.internal_id,
+ productIds: products.map((p) => p.id),
+ internalProductIds: products.map((p) => p.internal_id),
+ org,
+ });
+ } catch (error) {
+ logger.error("handleBillNowPrices: error retrieving invoice", error);
+ }
greptile
logic: Invoice creation errors are silently caught and logged. This could leave the system in an inconsistent state if invoice creation fails.
```suggestion
} catch (error) {
logger.error("handleBillNowPrices: error retrieving invoice", error);
+ throw new Error(`Failed to create invoice from Stripe: ${error.message}`);
}
```
diff block
+import { SupabaseClient } from "@supabase/supabase-js";
+import { Stripe } from "stripe";
+import { AttachParams } from "../products/AttachParams.js";
+import { FullCusProduct } from "@shared/models/cusModels/cusProductModels.js";
+import {
+ BillingInterval,
+ BillingType,
+ CusEntWithEntitlement,
+ ErrCode,
+ UsagePriceConfig,
+} from "@autumn/shared";
+import { Decimal } from "decimal.js";
+import {
+ getBillingType,
+ getPriceForOverage,
+} from "@/internal/prices/priceUtils.js";
+import { createStripeCli } from "@/external/stripe/utils.js";
+import { CustomerEntitlementService } from "../entitlements/CusEntitlementService.js";
+import { payForInvoice } from "@/external/stripe/stripeInvoiceUtils.js";
+import { getInvoiceExpansion } from "@/external/stripe/stripeInvoiceUtils.js";
+import RecaseError from "@/utils/errorUtils.js";
+import { StatusCodes } from "http-status-codes";
+import { InvoiceService } from "../invoices/InvoiceService.js";
+import { stripeToAutumnInterval } from "tests/utils/stripeUtils.js";
+import { getResetBalancesUpdate } from "../entitlements/groupByUtils.js";
+
+// Add usage to end of cycle
+const addUsageToNextInvoice = async ({
+ intervalToInvoiceItems,
+ intervalToSub,
+ customer,
+ org,
+ logger,
+ sb,
+ attachParams,
+ curCusProduct,
+}: {
+ intervalToInvoiceItems: any;
+ intervalToSub: any;
+ customer: any;
+ org: any;
+ logger: any;
+ sb: SupabaseClient;
+ attachParams: AttachParams;
+ curCusProduct: FullCusProduct;
+}) => {
+ for (const interval in intervalToInvoiceItems) {
+ const itemsToInvoice = intervalToInvoiceItems[interval];
+
+ if (itemsToInvoice.length === 0) {
+ continue;
+ }
+
+ // Add items to invoice
+ const stripeCli = createStripeCli({
+ org: org,
+ env: customer.env,
+ });
+
+ for (const item of itemsToInvoice) {
+ const amount = getPriceForOverage(item.price, item.overage);
+
+ logger.info(
+ ` feature: ${item.feature.id}, overage: ${item.overage}, amount: ${amount}`
+ );
+
+ let relatedSub = intervalToSub[interval];
+ if (!relatedSub) {
+ logger.error(
+ `No sub found for interval: ${interval}, for feature: ${item.feature.id}`
+ );
+
+ // Invoice immediately?
+ continue;
+ }
+
+ // Create invoice item
+ let invoiceItem = {
+ customer: customer.processor.id,
+ currency: org.default_currency,
+ description: `${curCusProduct.product.name} - ${
+ item.feature.name
+ } x ${Math.round(item.usage)}`,
+ price_data: {
+ product: (item.price.config! as UsagePriceConfig).stripe_product_id!,
+ unit_amount: Math.round(amount * 100),
+ currency: org.default_currency,
+ },
+ subscription: relatedSub.id,
+ };
+
+ await stripeCli.invoiceItems.create(invoiceItem);
+
+ // Update cus ent to 0
+ await CustomerEntitlementService.update({
+ sb,
+ id: item.relatedCusEnt!.id,
+ updates: getResetBalancesUpdate({
+ cusEnt: item.relatedCusEnt!,
+ allowance: 0,
+ }),
+ });
+
+ // Update existing cusEnt in attachParams
+ let cusProducts = attachParams.cusProducts;
+ for (const cusProduct of cusProducts!) {
+ for (let i = 0; i < cusProduct.customer_entitlements.length; i++) {
+ let cusEnt = cusProduct.customer_entitlements[i];
+ if (cusEnt.id === item.relatedCusEnt!.id) {
+ let balancesUpdate = getResetBalancesUpdate({
+ cusEnt,
+ allowance: 0,
+ });
+ cusProduct.customer_entitlements[i] = {
+ ...cusEnt,
+ ...balancesUpdate,
+ };
+ }
+ }
+ }
+ }
+ }
+};
+
+const invoiceForUsageImmediately = async ({
+ intervalToInvoiceItems,
+ customer,
+ org,
+ logger,
+ sb,
+ curCusProduct,
+ attachParams,
+ newSubs,
+}: {
+ intervalToInvoiceItems: any;
+ customer: any;
+ org: any;
+ logger: any;
+ sb: SupabaseClient;
+ curCusProduct: FullCusProduct;
+ attachParams: AttachParams;
+ newSubs: Stripe.Subscription[];
+}) => {
+ // 1. Create invoice
+ const stripeCli = createStripeCli({
+ org: org,
+ env: customer.env,
+ });
+
+ // const invoice = await stripeCli.invoices.create({
+ // customer: customer.processor.id,
+ // auto_advance: true,
+ // });
+ let invoice: Stripe.Invoice;
+ let newInvoice = false;
+ if (attachParams.invoiceOnly) {
+ invoice = await stripeCli.invoices.retrieve(
+ newSubs[0].latest_invoice as string
+ );
+
+ if (invoice.status !== "draft") {
+ newInvoice = true;
+ invoice = await stripeCli.invoices.create({
+ customer: customer.processor.id,
+ auto_advance: true,
+ });
+ }
+ } else {
+ newInvoice = true;
+ invoice = await stripeCli.invoices.create({
+ customer: customer.processor.id,
+ auto_advance: true,
+ });
+ }
+
+ // 2. Add items to invoice
+ let invoiceItems = Object.values(intervalToInvoiceItems).flat() as any[];
+ if (invoiceItems.length === 0) {
+ return;
+ }
+
+ for (const item of invoiceItems) {
+ const amount = getPriceForOverage(item.price, item.overage);
+
+ logger.info(
+ ` feature: ${item.feature.id}, overage: ${item.overage}, amount: ${amount}`
+ );
+
+ let invoiceItem = {
+ customer: customer.processor.id,
+ invoice: invoice.id,
+ currency: org.default_currency,
+ description: `${curCusProduct.product.name} - ${
+ item.feature.name
+ } x ${Math.round(item.usage)}`,
+ price_data: {
+ product: (item.price.config! as UsagePriceConfig).stripe_product_id!,
+ unit_amount: Math.round(amount * 100),
+ currency: org.default_currency,
+ },
+ };
+
+ await stripeCli.invoiceItems.create(invoiceItem);
+
+ // // Set cus ent to 0
+ await CustomerEntitlementService.update({
+ sb,
+ id: item.relatedCusEnt!.id,
+ updates: {
+ balance: 0,
+ },
+ });
+ }
+
+ // Finalize and pay invoice
+ const finalizedInvoice = await stripeCli.invoices.finalizeInvoice(
+ invoice.id,
+ getInvoiceExpansion()
+ );
+
+ let curProduct = curCusProduct.product;
+
+ if (newInvoice) {
+ try {
+ await InvoiceService.createInvoiceFromStripe({
+ sb,
+ stripeInvoice: finalizedInvoice,
+ internalCustomerId: customer.internal_id,
+ org: org,
+ productIds: [curProduct.id],
+ internalProductIds: [curProduct.internal_id],
+ });
+ const { paid, error } = await payForInvoice({
+ fullOrg: org,
+ env: customer.env,
+ customer,
+ invoice,
+ logger,
+ });
+
+ if (!paid) {
+ logger.warn("Failed to pay invoice for remaining usages", {
+ stripeInvoice: newInvoice,
+ paymentError: error,
+ });
+ }
+ } catch (error) {}
greptile
logic: Empty catch block could silently fail and leave system in inconsistent state
suggested fix
+ } catch (error) {
+ logger.error('Failed to process invoice payment', { error });
}
diff block
-pub mod account_info;
-mod account_locks;
-pub mod accounts;
-pub mod accounts_cache;
-pub mod accounts_db;
-pub mod errors;
-mod persist;
-pub mod verify_accounts_hash_in_background;
-pub use persist::{AccountsPersister, FLUSH_ACCOUNTS_SLOT_FREQ};
-pub mod geyser;
-pub mod utils;
-
-pub const ACCOUNTS_RUN_DIR: &str = "run";
-pub const ACCOUNTS_SNAPSHOT_DIR: &str = "snapshot";
+use std::{path::PathBuf, sync::Arc};
+
+use parking_lot::RwLock;
+use solana_account::{
+ cow::AccountBorrowed, AccountSharedData, ReadableAccount,
+};
+use solana_pubkey::Pubkey;
+
+use config::AdbConfig;
+use error::AdbError;
+use index::AdbIndex;
+use snapshot::SnapshotEngine;
+use storage::AccountsStorage;
+pub type AdbResult<T> = Result<T, AdbError>;
+/// Stop the World Lock, used to halt all writes to adb while
+/// some critical operation is in action, e.g. snapshotting
+pub type StWLock = Arc<RwLock<()>>;
+
+static mut SNAPSHOT_FREQUENCY: u64 = 0;
+
+macro_rules! inspecterr {
+ ($result: expr, $msg: expr) => {
+ $result.inspect_err(|err| eprintln!("adb - {} error: {err}", $msg))?
+ };
+ ($result: expr, $msg: expr, @option) => {
+ $result
+ .inspect_err(|err| eprintln!("adb - {} error: {err}", $msg))
+ .ok()
+ };
+ ($result: expr, $msg: expr, @silent) => {
+ match $result {
+ Ok(v) => v,
+ Err(error) => {
+ eprintln!("adb - {} error: {error}", $msg);
+ return;
+ }
+ }
+ };
+}
+
+#[repr(C)] // perf: storage and cache will be stored in two contigious cache lines
+pub struct AccountsDb {
+ /// Main accounts storage, where actual account records are kept
+ storage: AccountsStorage,
+ /// Index manager, used for various lookup operations
+ index: AdbIndex,
+ /// Snapshots manager, boxed for cache efficiency, as this field is rarely used
+ snap: Box<SnapshotEngine>,
+ /// Stop the world lock, currently used for snapshotting only
+ lock: StWLock,
+}
+
+impl AccountsDb {
+ /// Open or create accounts database
+ pub fn new(config: &AdbConfig, lock: StWLock) -> AdbResult<Self> {
+ inspecterr!(
+ std::fs::create_dir_all(&config.directory),
+ "ensuring existence of accountsdb directory"
+ );
+ let storage =
+ inspecterr!(AccountsStorage::new(config), "storage creation");
+ let index = inspecterr!(AdbIndex::new(config), "index creation");
+ let snap = inspecterr!(
+ SnapshotEngine::new(config.directory.clone(), config.max_snapshots),
+ "snapshot engine creation"
+ );
+ // no need to store global constants in type, this
+ // is the only place it's set, so its use is safe
+ unsafe { SNAPSHOT_FREQUENCY = config.snapshot_frequency };
+ Ok(Self {
+ storage,
+ index,
+ snap,
+ lock,
+ })
+ }
+
+ /// Opens existing database with given snapshot_frequency, used for tests and tools
+ /// most likely you want to use [new](AccountsDb::new) method
+ pub fn open(directory: PathBuf) -> AdbResult<Self> {
+ let config = AdbConfig {
+ directory,
+ snapshot_frequency: u64::MAX,
+ ..Default::default()
+ };
+ Self::new(&config, StWLock::default())
+ }
+
+ pub fn get_account(&self, pubkey: &Pubkey) -> AdbResult<AccountSharedData> {
+ let offset = self.index.get_account_offset(pubkey)?;
+ let memptr = self.storage.offset(offset);
+ let account =
+ unsafe { AccountSharedData::deserialize_from_mmap(memptr) };
+ Ok(account.into())
+ }
+
+ pub fn insert_account(&self, pubkey: &Pubkey, account: &AccountSharedData) {
+ match account {
+ AccountSharedData::Borrowed(acc) => {
+ // this is the beauty of this AccountsDB implementation: when we have Borrowed
+ // variant, we just increment atomic counter (nanosecond op) and that's it,
+ // everything is already written, and new readers will now see the latest update
+ acc.commit();
+ }
+ AccountSharedData::Owned(acc) => {
+ let datalen = account.data().len();
+ // we multiply by 2 for shadow buffer and add extra space for metadata
+ let size = AccountSharedData::serialized_size_aligned(datalen)
+ * 2
+ + AccountSharedData::SERIALIZED_META_SIZE;
+
+ let blocks = self.storage.get_block_count(size);
+ // TODO(bmuddha) perf optimization: `allocation_exists` involves index lock + BTree
+ // search and should ideally be used only when we have enough fragmentation to
+ // increase the chances of finding perfect allocation in recyclable list. We should
+ // utilize `AccountsStorage::fragmentation` to track fragmentation factor and start
+ // recycling only when it exceeds some preconfigured threshold
+ let allocation = match self.index.allocation_exists(blocks) {
+ // if we could recycle some "hole" in database, use it
+ Ok(recycled) => {
+ // bookkeeping for deallocated(free hole) space
+ self.storage.decrement_deallocations(recycled.blocks);
+ self.storage.recycle(recycled)
+ }
+ // otherwise allocate from the end of memory map
+ Err(AdbError::NotFound) => self.storage.alloc(size),
+ Err(other) => {
+ // This can only happen if we have catastrophic system mulfunction
+ log::error!("failed to insert account, index allocation check error: {other}");
+ return;
+ }
+ };
+
+ unsafe {
+ AccountSharedData::serialize_to_mmap(
+ acc,
+ allocation.storage,
+ )
+ };
+ // update accounts index
+ let dealloc = inspecterr!(
+ self.index.insert_account(
+ pubkey,
+ account.owner(),
+ allocation
+ ),
+ "account index insertion",
+ @silent
+ );
greptile
logic: Using `@silent` here silently ignores errors during account index insertion. This could lead to inconsistent state if the index update fails but the account data was already written.
diff block
+import jsforce from "jsforce";
+import { getPreferenceValues, LocalStorage } from "@raycast/api";
+import fs from "fs";
+import path from "path";
+
+interface Preferences {
+ salesforceUrl: string;
+ memoDirectory: string;
+ salesforceObjectType: string;
+ customObjectName: string;
+}
+
+interface SalesforceCredentials {
+ username: string;
+ password: string;
+ securityToken: string;
+}
+
+// Salesforceレコードの属性型
+interface SalesforceAttributes {
+ type?: string;
+ url?: string;
+}
+
+// Salesforceレコードの型
+interface SalesforceRecordData {
+ Id: string;
+ Name: string;
+ attributes?: SalesforceAttributes;
+ [key: string]: unknown;
+}
+
+export interface SalesforceRecord {
+ Id: string;
+ Name: string;
+ Type: string;
+}
+
+// Salesforce接続関連のクラス
+export class SalesforceService {
+ private conn: jsforce.Connection | null = null;
+ private preferences: Preferences;
+
+ constructor() {
+ this.preferences = getPreferenceValues<Preferences>();
+ }
+
+ // 使用するSalesforceオブジェクトの名前を取得
+ private getSalesforceObjectName(): string {
+ const { salesforceObjectType, customObjectName } = this.preferences;
+
+ if (salesforceObjectType === "Custom" && customObjectName) {
+ return customObjectName;
+ }
+
+ return salesforceObjectType || "ContentNote"; // デフォルトはContentNote
+ }
+
+ // ログイン情報を保存
+ async saveCredentials(credentials: SalesforceCredentials): Promise<void> {
+ await LocalStorage.setItem(
+ "salesforce_credentials",
+ JSON.stringify(credentials),
+ );
+ }
+
+ // ログイン情報を取得
+ async getCredentials(): Promise<SalesforceCredentials | null> {
+ const storedCredentials = await LocalStorage.getItem<string>(
+ "salesforce_credentials",
+ );
+ if (!storedCredentials) {
+ return null;
+ }
+ return JSON.parse(storedCredentials);
+ }
+
+ // Salesforceに接続
+ async connect(): Promise<boolean> {
+ const credentials = await this.getCredentials();
+ if (!credentials) {
+ return false;
+ }
+
+ try {
+ this.conn = new jsforce.Connection({
+ loginUrl:
+ this.preferences.salesforceUrl || "https://login.salesforce.com",
+ });
+
+ // UTF-8エンコーディングを使用するように設定
+ if (this.conn && this.conn.request) {
+ // リクエストヘッダーに文字セット情報を追加
+ this.conn.request.headers = {
+ ...this.conn.request.headers,
+ "Accept-Charset": "utf-8",
+ "Content-Type": "application/json; charset=utf-8",
+ };
+
+ // APIバージョン情報をログに出力(デバッグ用)
+ console.log("Salesforce API接続設定完了");
+ }
+
+ // ログイン処理
+ console.log("Salesforceログイン開始");
+ await this.conn.login(
+ credentials.username,
+ credentials.password + (credentials.securityToken || ""),
+ );
+
+ // 接続成功後にログ出力
+ console.log("Salesforceに接続しました。");
+
+ return true;
+ } catch (error) {
+ console.error("Salesforce connection error:", error);
+ return false;
+ }
+ }
+
+ // レコード検索
+ async searchRecords(searchTerm: string): Promise<SalesforceRecord[]> {
+ if (!this.conn) {
+ const connected = await this.connect();
+ if (!connected) {
+ throw new Error("Salesforceに接続できませんでした。");
+ }
+ }
+
+ try {
+ const result = await this.conn!.search(
+ `FIND {${searchTerm}} IN ALL FIELDS RETURNING CS__c(Id, Name), Contact(Id, Name), Opportunity(Id, Name), Lead(Id, Name)`,
+ );
+
+ // 検索結果を統合して返す
+ const records: SalesforceRecord[] = [];
+
+ if (result.searchRecords) {
+ console.log("検索結果:", JSON.stringify(result.searchRecords, null, 2));
+
+ for (const record of result.searchRecords as SalesforceRecordData[]) {
+ // SOSLの結果には各レコードのオブジェクト型を表す属性が含まれているはず
+ console.log("レコード詳細:", JSON.stringify(record, null, 2));
+
+ // 改善: 複数の方法でオブジェクト型を特定
+ let objectType = "";
+
+ // 方法1: AttributesからTypeを取得
+ if (record.attributes && record.attributes.type) {
+ objectType = record.attributes.type;
+ }
+ // 方法2: レコード名にTypeが含まれる属性を探す
+ else {
+ const typeField = Object.keys(record).find((key) =>
+ key.endsWith("Type"),
+ );
+ if (typeField) {
+ objectType = typeField.replace("Type", "");
+ }
+ // 方法3: URLから推測
+ else if (record.attributes && record.attributes.url) {
+ const urlMatch =
+ record.attributes.url.match(/\/sobjects\/(\w+)\//);
+ if (urlMatch && urlMatch[1]) {
+ objectType = urlMatch[1];
+ }
+ }
+ }
+
+ // どの方法でも取得できなかった場合はUnknownとする
+ if (!objectType) {
+ objectType = "Unknown";
+ }
+
+ records.push({
+ Id: record.Id,
+ Name: record.Name,
+ Type: objectType,
+ });
+ }
+ }
+
+ return records;
+ } catch (error) {
+ console.error("Salesforceレコード検索エラー:", error);
+ throw new Error("レコードの検索中にエラーが発生しました。");
+ }
+ }
+
+ // メモレコードを作成
+ async createMemoRecord(
+ subject: string,
+ body: string,
+ relatedRecordId?: string,
+ ): Promise<string> {
+ if (!this.conn) {
+ const connected = await this.connect();
+ if (!connected) {
+ throw new Error("Salesforceに接続できませんでした。");
+ }
+ }
+
+ try {
+ console.log("Salesforceメモ作成開始:", {
+ subject,
+ bodyLength: body.length,
+ relatedRecordId,
+ });
+
+ // 文字化け防止のための処理
+ // タイトルと本文をエンコードして送信
+ const safeSubject = subject.trim();
+ const safeBody = body.trim();
+
+ console.log(`タイトル長: ${Buffer.from(safeSubject).length} バイト`);
+ console.log(`本文長: ${Buffer.from(safeBody).length} バイト`);
+
+ // Salesforceオブジェクトの作成
+ const objectName = this.getSalesforceObjectName();
+ console.log(`${objectName}レコード作成準備`);
+
+ // Salesforceオブジェクトのフィールド
+ const memoData: Record<string, string> = {
+ Title: safeSubject,
+ };
+
+ // 本文をBase64でエンコード(日本語文字の文字化け対策)
+ try {
+ // すべてのメモコンテンツをBase64エンコードして設定
+ console.log("コンテンツをBase64エンコードします");
+ const contentBuffer = Buffer.from(safeBody, "utf8");
+ memoData.Content = contentBuffer.toString("base64");
+ console.log("Base64エンコード完了:", {
+ contentLengthBefore: safeBody.length,
+ contentLengthAfter: memoData.Content.length,
+ });
+ } catch (encodeError) {
+ console.error("Base64エンコードエラー:", encodeError);
+ // エンコードエラーの場合は直接テキストを設定(フォールバック)
+ memoData.Content = safeBody;
+ }
+
+ // Salesforce API実行時の詳細ログ
+ console.log(`${objectName}作成リクエスト準備:`, {
+ title: memoData.Title,
+ contentLength: memoData.Content?.length,
+ });
+
+ // Salesforceレコード作成
+ const result = await this.conn!.sobject(objectName).create(memoData);
+ console.log(`${objectName}作成レスポンス:`, JSON.stringify(result));
+
+ if (result.success) {
+ // 関連レコードがある場合はContentDocumentLinkを作成
+ if (relatedRecordId) {
+ try {
+ // 選択したオブジェクトがContentNoteかContentDocumentの場合のみContentDocumentLinkを作成
+ if (
+ objectName === "ContentNote" ||
+ objectName === "ContentDocument"
+ ) {
+ // ContentDocumentLinkオブジェクトを作成して関連付け
+ console.log("ContentDocumentLink作成開始:", {
+ contentDocumentId: result.id,
+ linkedEntityId: relatedRecordId,
+ });
+
+ const linkData = {
+ ContentDocumentId: result.id,
+ LinkedEntityId: relatedRecordId,
+ ShareType: "V", // V=Viewer
+ };
+
+ const linkResult = await this.conn!.sobject(
+ "ContentDocumentLink",
+ ).create(linkData);
+ console.log(
+ "ContentDocumentLink作成結果:",
+ JSON.stringify(linkResult),
+ );
+
+ if (!linkResult.success) {
+ console.error("関連レコードのリンク作成に失敗:", linkResult);
+ }
greptile
logic: linkResult failure should throw an error since partial success could leave data in an inconsistent state
suggested fix
if (!linkResult.success) {
+ console.error("Failed to create related record link:", linkResult);
+ throw new Error("Failed to create relationship between memo and record");
}
diff block
);
const handleEditRole = useCallback(async () => {
if (editRole) {
- await updateRole(editRole);
+ const res = await updateRole(editRole);
+
+ if (res) {
+ setRoles((prev) => {
+ return prev.map((role) => {
+ if (role.id === editRole.id) {
+ return res.data;
+ }
+ return role;
+ });
+ });
+ }
setEditRole(null);
}
greptile
logic: no error handling if updateRole fails - could leave UI in inconsistent state
diff block
)}
</ActionPanel>
}
- />
+ />,
);
});
// Fetch WNBA Stats
const { isLoading: wnbaScheduleStats, data: wnbaScoresAndSchedule } = useFetch<Response>(
- "https://site.api.espn.com/apis/site/v2/sports/basketball/wnba/scoreboard",
+ `https://site.api.espn.com/apis/site/v2/sports/basketball/wnba/scoreboard?dates=${dateRange}`,
);
+
+ const wnbaDayItems: DayItems[] = [];
const wnbaGames = wnbaScoresAndSchedule?.events || [];
- const wnbaItems = wnbaGames.map((wnbaGame, index) => {
+
+ wnbaGames.forEach((wnbaGame, index) => {
+ const gameDate = new Date(wnbaGame.date);
+ const wnbaGameDay = gameDate.toLocaleDateString([], {
+ dateStyle: "medium",
+ });
+
+ if (!wnbaDayItems.find((wnbaDay) => wnbaDay.title === wnbaGameDay)) {
+ wnbaDayItems.push({
+ title: wnbaGameDay,
+ games: [],
+ });
+ }
+
+ const wnbaDay = wnbaDayItems.find((wnbaDay) => wnbaDay.title === wnbaGameDay);
+
const gameTime = new Date(wnbaGame.date).toLocaleTimeString([], {
hour: "2-digit",
minute: "2-digit",
});
let accessoryTitle = gameTime;
let accessoryColor = Color.SecondaryText;
- let accessoryToolTip;
+ let accessoryIcon = { source: Icon.Calendar, tintColor: Color.SecondaryText };
+ let accessoryToolTip = "Scheduled";
+
+ const startingSoonInterval = 15 * 60 * 1000;
+ const currentDate = new Date();
+ const timeUntilGameStarts = gameDate.getTime() - currentDate.getTime();
+
+ if (timeUntilGameStarts <= startingSoonInterval && wnbaGame.status.type.state !== "in") {
greptile
logic: Inconsistent state check between NBA and WNBA - NBA checks for 'pre' state while WNBA checks for '!== in'. This could cause different behavior.
diff block
document_id=document.id,
already_existed=indexing_pipeline_result.new_docs > 0,
)
+
+
+@router.delete("/ingestion/{document_id}")
+def delete_ingestion_doc(
+ document_id: str,
+ user: User | None = Depends(api_key_dep),
+ db_session: Session = Depends(get_session),
+) -> DeleteIngestionResult:
+ tenant_id = get_current_tenant_id()
+ if user is None or user.role != UserRole.ADMIN:
+ raise HTTPException(
+ status_code=403, detail="User does not have permission to delete documents"
+ )
+
+ document = get_document(document_id, db_session)
+ if document is None:
+ raise HTTPException(status_code=404, detail="Document not found")
+
+ active_search_settings = get_active_search_settings(db_session)
+ doc_index = get_default_document_index(
+ active_search_settings.primary,
+ active_search_settings.secondary,
+ )
+
+ # A document might have image chunks that its associated with, with
+ # each of these storing an image in the file store. Thus, we have to first
+ # retrieve the chunks, then delete the related image sections from the file store.
+ chunks = doc_index.id_based_retrieval(
+ [VespaChunkRequest(document_id=document.id)],
+ filters=IndexFilters(access_control_list=None, tenant_id=tenant_id),
+ )
+
+ file_store = get_default_file_store(db_session)
+ for chunk in chunks:
+ if chunk.image_file_name:
+ file_store.delete_file(chunk.image_file_name)
+
greptile
logic: File deletion errors are not caught. Consider wrapping file_store.delete_file in error handling so a failure doesn’t leave the deletion process in an inconsistent state.
suggested fix
file_store = get_default_file_store(db_session)
for chunk in chunks:
if chunk.image_file_name:
+ try:
file_store.delete_file(chunk.image_file_name)
+ except Exception as e:
+ logger.error(f"Failed to delete file {chunk.image_file_name}: {e}")
+ raise HTTPException(status_code=500, detail=f"Failed to delete associated files: {str(e)}")
diff block
try {
setUpdatedObjectNamePlural(objectNamePluralForRedirection);
+ if (objectMetadataItem.isCustom) {
+ await updateOneObjectMetadataItem({
+ idToUpdate: objectMetadataItem.id,
+ updatePayload: formValues,
+ });
+
+ formConfig.reset(undefined, { keepValues: true });
+ navigate(SettingsPath.ObjectDetail, {
+ objectNamePlural: objectNamePluralForRedirection,
+ });
+ return;
+ }
+
+ if (
+ formValues.isLabelSyncedWithName === false &&
+ isLabelSyncedWithName === true
+ ) {
greptile
logic: Missing check for when isLabelSyncedWithName changes from false to true - could leave labels in an inconsistent state
diff block
+import { useState, useEffect } from "react";
+import {
+ List,
+ Color,
+ ActionPanel,
+ Action,
+ showToast,
+ Toast,
+ Icon,
+ getPreferenceValues,
+ Application,
+ popToRoot,
+} from "@raycast/api";
+import { showFailureToast } from "@raycast/utils";
+import { fetchDisplays, fetchDisplayStatus, fetchDisplayResolution, fetchMainDisplay, Display } from "./utils";
+import {
+ toggleDisplay,
+ togglePIP,
+ increaseBrightness,
+ decreaseBrightness,
+ increaseContrast,
+ decreaseContrast,
+ availabilityBrightness,
+ availabilityContrast,
+} from "./actions";
+import ResolutionList from "./list-resolutions";
+import events from "./events";
+
+type FilterOption = "all" | "displays" | "virtualScreens";
+
+type DisplayItemProps = {
+ display: Display;
+ status: string;
+ resolution: string;
+ isMain: boolean;
+ onToggle: () => void;
+};
+
+function verifyAppAvailability() {
+ const { betterdisplayApp } = getPreferenceValues<{ betterdisplayApp: Application }>();
+
+ if (betterdisplayApp.name !== "BetterDisplay") {
+ showFailureToast("BetterDisplay app not set", {
+ title: "BetterDisplay app not set",
+ message: "Please set the BetterDisplay app in the extension preferences.",
+ });
+
+ popToRoot();
+ }
+}
+
+function DisplayItem({ display, status, resolution, isMain, onToggle }: DisplayItemProps) {
+ const normalizedStatus = status || "Loading";
+ const statusColor = normalizedStatus.toLowerCase() === "on" ? Color.Green : Color.Red;
+
+ // New state to track availability of brightness and contrast functions.
+ const [brightnessAvailable, setBrightnessAvailable] = useState(false);
+ const [contrastAvailable, setContrastAvailable] = useState(false);
+
+ useEffect(() => {
+ async function checkAvailability() {
+ if (normalizedStatus.toLowerCase() === "on") {
+ const availB = await availabilityBrightness(display.tagID);
+ const availC = await availabilityContrast(display.tagID);
+ setBrightnessAvailable(availB);
+ setContrastAvailable(availC);
+ } else {
+ setBrightnessAvailable(false);
+ setContrastAvailable(false);
+ }
+ }
+ checkAvailability();
+ }, [display.tagID, normalizedStatus]);
+
+ // Helper to wrap actions with toast notifications.
+ async function handleAction(
+ actionFn: () => Promise<string>,
+ successTitle: string,
+ successMessage: string,
+ errorTitle: string,
+ ) {
+ try {
+ const result = await actionFn();
+ await showToast({ title: successTitle, message: result || successMessage, style: Toast.Style.Success });
+ onToggle();
+ } catch (error) {
+ showFailureToast(error, { title: errorTitle });
+ }
+ }
+
+ // Build accessories: always show status; if on, show resolution.
+ const accessories: List.Item.Accessory[] = [{ tag: { value: normalizedStatus, color: statusColor } }];
+ if (normalizedStatus.toLowerCase() === "on" && resolution && resolution !== "Loading") {
+ accessories.unshift({ tag: { value: resolution, color: Color.Blue } });
+ }
+
+ return (
+ <List.Item
+ key={display.tagID}
+ id={display.tagID}
+ title={display.name}
+ subtitle={isMain ? "Main Display" : undefined}
+ accessories={accessories}
+ actions={
+ <ActionPanel>
+ <Action
+ title="Toggle Display"
+ icon={Icon.Power}
+ onAction={() =>
+ handleAction(
+ () => toggleDisplay(display.tagID),
+ "Display toggled",
+ `${display.name} has been toggled.`,
+ "Error toggling display",
+ )
+ }
+ />
+ {normalizedStatus.toLowerCase() === "on" && (
+ <>
+ <Action
+ title="Toggle Pip"
+ icon={Icon.Image}
+ onAction={() =>
+ handleAction(
+ () => togglePIP(display.tagID),
+ "PIP toggled",
+ `${display.name} PIP has been toggled.`,
+ "Error toggling PIP",
+ )
+ }
+ />
+ {/* Only render brightness actions if available */}
+ {brightnessAvailable && (
+ <>
+ <Action
+ title="Increase Brightness"
+ icon={Icon.ArrowUp}
+ shortcut={{ modifiers: ["cmd", "shift"], key: "arrowUp" }}
+ onAction={() =>
+ handleAction(
+ () => increaseBrightness(display.tagID),
+ "Brightness Increased",
+ `${display.name} brightness increased.`,
+ "Error increasing brightness",
+ )
+ }
+ />
+ <Action
+ title="Decrease Brightness"
+ icon={Icon.ArrowDown}
+ shortcut={{ modifiers: ["cmd", "shift"], key: "arrowDown" }}
+ onAction={() =>
+ handleAction(
+ () => decreaseBrightness(display.tagID),
+ "Brightness Decreased",
+ `${display.name} brightness decreased.`,
+ "Error decreasing brightness",
+ )
+ }
+ />
+ </>
+ )}
+ {/* Only render contrast actions if available */}
+ {contrastAvailable && (
+ <>
+ <Action
+ title="Increase Contrast"
+ icon={Icon.CircleProgress50}
+ shortcut={{ modifiers: ["cmd", "opt"], key: "arrowUp" }}
+ onAction={() =>
+ handleAction(
+ () => increaseContrast(display.tagID),
+ "Contrast Increased",
+ `${display.name} contrast increased.`,
+ "Error increasing contrast",
+ )
+ }
+ />
+ <Action
+ title="Decrease Contrast"
+ icon={Icon.Circle}
+ shortcut={{ modifiers: ["cmd", "opt"], key: "arrowDown" }}
+ onAction={() =>
+ handleAction(
+ () => decreaseContrast(display.tagID),
+ "Contrast Decreased",
+ `${display.name} contrast decreased.`,
+ "Error decreasing contrast",
+ )
+ }
+ />
+ </>
+ )}
+ <Action.Push
+ title="Change Resolution"
+ icon={Icon.ArrowsExpand}
+ shortcut={{ modifiers: ["cmd", "shift"], key: "m" }}
+ target={<ResolutionList display={{ tagID: display.tagID, name: display.name }} />}
+ />
+ </>
+ )}
+ </ActionPanel>
+ }
+ />
+ );
+}
+
+export default function ManageDisplays() {
+ verifyAppAvailability();
+
+ const [displays, setDisplays] = useState<Display[]>([]);
+ const [isLoading, setIsLoading] = useState(true);
+ const [statuses, setStatuses] = useState<{ [tagID: string]: string }>({});
+ const [resolutions, setResolutions] = useState<{ [tagID: string]: string }>({});
+ const [filter, setFilter] = useState<FilterOption>("all");
+ const [mainDisplay, setMainDisplay] = useState<Display | null>(null);
+ const [refreshCount, setRefreshCount] = useState(0);
+
+ // Load displays.
+ useEffect(() => {
+ async function loadDisplays() {
+ try {
+ const stdout = await fetchDisplays();
+ const jsonString = stdout ? `[${stdout.trim()}]` : "[]";
+ const data = JSON.parse(jsonString) as Display[];
+ setDisplays(data);
+ } catch (error) {
+ console.error("Failed to load displays", error);
+ } finally {
+ setIsLoading(false);
+ }
+ }
+ loadDisplays();
+ }, []);
+
+ // Load main display.
+ useEffect(() => {
+ async function loadMainDisplay() {
+ const main = await fetchMainDisplay();
+ setMainDisplay(main);
+ }
+ loadMainDisplay();
+ }, []);
greptile
logic: This loadMainDisplay function doesn't have error handling. If fetchMainDisplay fails, it could leave the component in an inconsistent state.
suggested fix
async function loadMainDisplay() {
try {
const main = await fetchMainDisplay();
setMainDisplay(main);
} catch (error) {
+ console.error("Failed to load main display", error);
+ // Still set main display to null to ensure consistent state
+ setMainDisplay(null);
}
}
loadMainDisplay();
}, []);
diff block
+use fdb_util::prelude::*;
+use foundationdb::{self as fdb, options::StreamingMode, tuple::Subspace, FdbBindingError};
+use futures_util::TryStreamExt;
+use global_error::{ensure, unwrap, GlobalResult};
+use sqlx::{
+ migrate::MigrateDatabase,
+ sqlite::{SqliteAutoVacuum, SqliteConnectOptions, SqliteSynchronous},
+ ConnectOptions, Sqlite,
+};
+use std::{
+ collections::HashMap,
+ fmt::Debug,
+ path::{Path, PathBuf},
+ sync::{Arc, Weak},
+ time::Duration,
+};
+use tokio::{
+ sync::{broadcast, Mutex},
+ time::Instant,
+};
+
+use crate::{Error, FdbPool};
+
+mod keys;
+
+#[cfg(test)]
+mod tests;
+
+const GC_INTERVAL: Duration = Duration::from_secs(1);
+const POOL_TTL: Duration = Duration::from_secs(15);
+const CHUNK_SIZE: usize = 10_000; // 10 KB, not KiB, see https://apple.github.io/foundationdb/blob.html
+
+#[derive(Debug, thiserror::Error)]
+enum SqliteFdbError {
+ #[error("mismatched chunk {key_idx}, expected {chunk_count}")]
+ MismatchedChunk { chunk_count: usize, key_idx: usize },
+}
+
+pub type SqlitePool = Arc<SqlitePoolInner>;
+
+pub struct SqlitePoolInner {
+ key_packed: KeyPacked,
+ conn: Arc<Mutex<sqlx::SqliteConnection>>,
+ read_only: bool,
+
+ /// Last time this pool was accessed (either by `get` or a ref was dropped, meaning the query
+ /// ended)
+ last_access: Mutex<Instant>,
+
+ manager: SqlitePoolManagerHandleWeak,
+}
+
+// HACK: Implement mock methods to make this act like an SQLite pool so it can be used with the SQL
+// macros.
+impl SqlitePoolInner {
+ pub fn try_acquire(&self) -> Option<tokio::sync::MutexGuard<'_, sqlx::SqliteConnection>> {
+ self.conn.try_lock().ok()
+ }
+
+ pub async fn acquire(
+ &self,
+ ) -> Result<tokio::sync::MutexGuard<'_, sqlx::SqliteConnection>, sqlx::Error> {
+ Ok(self.conn.lock().await)
+ }
+
+ pub async fn conn(
+ &self,
+ ) -> Result<tokio::sync::MutexGuard<'_, sqlx::SqliteConnection>, sqlx::Error> {
+ Ok(self.conn.lock().await)
+ }
+}
+
+impl SqlitePoolInner {
+ /// Snapshots the database to FDB. Should be called any time you need to be able to restore
+ /// from the DB.
+ pub async fn snapshot(self: &Arc<Self>) -> GlobalResult<()> {
+ let manager = unwrap!(self.manager.upgrade(), "manager is dropped");
+ manager.snapshot_sqlite_db(self).await
+ }
+
+ /// Returns the size of the database file in bytes
+ pub async fn debug_db_size(self: &Arc<Self>) -> GlobalResult<u64> {
+ let manager = unwrap!(self.manager.upgrade(), "manager is dropped");
+ let db_path = manager.build_sqlite_file_path(self.key_packed.clone());
+ let metadata = tokio::fs::metadata(db_path).await.map_err(Error::Io)?;
+ Ok(metadata.len())
+ }
+}
+
+/// DB key in packed form. This is not the full FDB key, this is the DB name segment in DbDataKey.
+type KeyPacked = Arc<Vec<u8>>;
+
+#[derive(Debug, Hash, Eq, PartialEq, Clone)]
+struct Key {
+ key_packed: KeyPacked,
+ read_only: bool,
+}
+
+pub type SqlitePoolManagerHandle = Arc<SqlitePoolManager>;
+pub type SqlitePoolManagerHandleWeak = Weak<SqlitePoolManager>;
+
+enum SqliteStorage {
+ Local,
+ FoundationDb,
+}
+
+pub struct SqlitePoolManager {
+ pools: Arc<Mutex<HashMap<Key, SqlitePool>>>,
+ shutdown: broadcast::Sender<()>,
+ fdb: Option<FdbPool>,
+ storage: SqliteStorage,
+ subspace: Subspace,
+}
+
+impl SqlitePoolManager {
+ pub fn new(fdb: Option<FdbPool>) -> SqlitePoolManagerHandle {
+ let pools = Arc::new(Mutex::new(HashMap::new()));
+ let (shutdown, _) = broadcast::channel(1);
+ let shutdown_rx = shutdown.subscribe();
+ let storage = if std::env::var("_RIVET_POOL_SQLITE_FORCE_LOCAL").map_or(false, |x| x == "1")
+ {
+ SqliteStorage::Local
+ } else {
+ SqliteStorage::FoundationDb
+ };
+
+ let pools_clone = pools.clone();
+ let manager = Arc::new(SqlitePoolManager {
+ pools: pools_clone,
+ shutdown,
+ fdb: fdb.clone(),
+ storage,
+ subspace: Subspace::all().subspace(&("rivet", "sqlite")),
+ });
+
+ tokio::task::spawn(manager.clone().manager_gc_loop(shutdown_rx));
+
+ manager
+ }
+
+ async fn read_from_fdb(&self, key_packed: KeyPacked, db_path: &Path) -> GlobalResult<()> {
+ let db_data_subspace = self
+ .subspace
+ .subspace(&keys::DbDataKey::new(key_packed.clone()));
+
+ let fdb = unwrap!(self.fdb.as_ref());
+ let (data, chunks) = fdb
+ .run(|tx, _mc| {
+ let db_data_subspace = db_data_subspace.clone();
+ async move {
+ // Fetch all chunks
+ let mut data_stream = tx.get_ranges_keyvalues(
+ fdb::RangeOption {
+ mode: StreamingMode::WantAll,
+ ..(&db_data_subspace).into()
+ },
+ false,
+ );
+
+ // Aggregate data
+ let mut buf = Vec::new();
+ let mut chunk_count = 0;
+ while let Some(entry) = data_stream.try_next().await? {
+ // Parse key
+ let key = self
+ .subspace
+ .unpack::<keys::DbDataChunkKey>(entry.key())
+ .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?;
+
+ // Validate chunk
+ if chunk_count != key.chunk {
+ return Err(FdbBindingError::CustomError(
+ SqliteFdbError::MismatchedChunk {
+ chunk_count,
+ key_idx: key.chunk,
+ }
+ .into(),
+ ));
+ }
+ chunk_count += 1;
+
+ // Write to buffer
+ buf.extend(entry.value());
+ }
+
+ Ok::<_, FdbBindingError>((buf, chunk_count))
+ }
+ })
+ .await?;
+
+ if chunks > 0 {
+ tracing::debug!(?chunks, data_len = ?data.len(), "loaded database from fdb");
+ tokio::fs::write(db_path, data).await.map_err(Error::Io)?;
+ } else {
+ tracing::debug!("no sqlite db exists");
+ }
+
+ Ok(())
+ }
+
+ /// Get or creates an sqlite pool for the given key
+ ///
+ /// IMPORTANT: Do not hold a reference to this value for an extended period of time. We use
+ /// this function call to determine when to GC a pool.
+ pub async fn get<K: TuplePack + Debug>(
+ self: &Arc<Self>,
+ key: K,
+ read_only: bool,
+ ) -> Result<SqlitePool, Error> {
+ let mut pools_guard = self.pools.lock().await;
+
+ let key_packed = Arc::new(key.pack_to_vec());
+ let key = Key {
+ key_packed: key_packed.clone(),
+ read_only,
+ };
+
+ // Pool already exists
+ if let Some(entry) = pools_guard.get_mut(&key) {
+ *entry.last_access.lock().await = Instant::now();
+ return Ok(entry.clone());
+ };
+
+ // Load from FDB if enabled
+ let db_path = self.build_sqlite_file_path(key_packed.clone());
+
+ match self.storage {
+ SqliteStorage::Local => {}
+ SqliteStorage::FoundationDb => {
+ self.read_from_fdb(key_packed.clone(), &db_path)
+ .await
+ .map_err(Error::Global)?;
+ }
+ }
+
+ let db_url = format!("sqlite://{}", db_path.display());
+
+ tracing::debug!(?key, ?db_url, "sqlite connecting");
+
+ // Init if doesn't exist
+ if !Sqlite::database_exists(&db_url)
+ .await
+ .map_err(Error::BuildSqlx)?
+ {
+ tracing::debug!(?db_url, "creating sqlite database");
+
+ Sqlite::create_database(&db_url)
+ .await
+ .map_err(Error::BuildSqlx)?;
+ }
+
+ // Connect to database
+ //
+ // We don't need a connection pool since we only have one reader/writer at a time
+ let conn_raw = db_url
+ .parse::<SqliteConnectOptions>()
+ .map_err(Error::BuildSqlx)?
+ .read_only(read_only)
+ // Set synchronous mode to NORMAL for performance and data safety balance
+ .synchronous(SqliteSynchronous::Normal)
+ // Set busy timeout to 5 seconds to avoid "database is locked" errors
+ .busy_timeout(Duration::from_secs(5))
+ // Enable foreign key constraint enforcement
+ .foreign_keys(true)
+ // Enable auto vacuuming and set it to incremental mode for gradual space reclaiming
+ .auto_vacuum(SqliteAutoVacuum::Incremental)
+ // Disable WAL for snapshotting
+ //
+ // Truncate is faster than Delete
+ .journal_mode(sqlx::sqlite::SqliteJournalMode::Truncate)
+ // Force all operations to be flushed to disk
+ //
+ // This impacts performance, but is required in order for snapshot to work
+ .synchronous(sqlx::sqlite::SqliteSynchronous::Full)
+ .connect()
+ .await
+ .map_err(Error::BuildSqlx)?;
+
+ let conn = Arc::new(Mutex::new(conn_raw));
+
+ tracing::debug!(?key, "sqlite connected");
+
+ let pool = Arc::new(SqlitePoolInner {
+ key_packed,
+ conn: conn.clone(),
+ read_only,
+ last_access: Mutex::new(Instant::now()),
+ manager: Arc::downgrade(self),
+ });
+
+ pools_guard.insert(key, pool.clone());
+
+ Ok(pool)
+ }
+
+ /// Evicts a database from the pool and snapshots it if needed
+ pub async fn evict<K: TuplePack + Debug>(
+ self: &Arc<Self>,
+ key: K,
+ read_only: bool,
+ ) -> Result<(), Error> {
+ let key_packed = Arc::new(key.pack_to_vec());
+ let key = Key {
+ key_packed,
+ read_only,
+ };
+
+ let mut pools_guard = self.pools.lock().await;
+
+ self.evict_database_inner(&key, &mut pools_guard)
+ .await
+ .map_err(Error::Global)
+ }
+}
+
+impl SqlitePoolManager {
+ /// Inner implementation of database eviction that handles the actual removal from the pool
+ async fn evict_database_inner(
+ &self,
+ key: &Key,
+ pools_guard: &mut tokio::sync::MutexGuard<'_, HashMap<Key, SqlitePool>>,
+ ) -> GlobalResult<()> {
+ tracing::debug!("evicting sqlite database");
+
+ let entry = unwrap!(
+ pools_guard.get(key),
+ "could not find sqlite pool with key to evict"
+ );
+
+ if !entry.read_only {
+ // Attempt to snapshot before removing
+ self.snapshot_sqlite_db(entry).await?;
+ }
+
+ // Remove the database file if using temporary storage
+ if matches!(self.storage, SqliteStorage::FoundationDb) {
+ let db_path = self.build_sqlite_file_path(key.key_packed.clone());
+ if let Err(err) = tokio::fs::remove_file(&db_path).await {
+ tracing::warn!(?err, ?db_path, "failed to remove sqlite db file");
+ }
+ }
+
+ // Remove from the pools map
+ pools_guard.remove(key);
+
+ Ok(())
+ }
+
+ /// GC loop for SqlitePoolManager
+ async fn manager_gc_loop(self: Arc<Self>, mut shutdown: broadcast::Receiver<()>) {
+ let mut interval = tokio::time::interval(GC_INTERVAL);
+
+ loop {
+ tokio::select! {
+ _ = interval.tick() => {},
+ Ok(_) = shutdown.recv() => {
+ tracing::debug!("shutting down sqlite pool manager");
+ break;
+ }
+ }
+
+ // Anything last used before this instant will be removed
+ let expire_ts = Instant::now() - POOL_TTL;
+
+ // Remove pools
+ {
+ let mut pools_guard = self.pools.lock().await;
+ let mut removed = 0;
+
+ // Find entries to remove
+ let mut to_remove = Vec::new();
+ for (k, v) in pools_guard.iter() {
+ // TODO: Figure out how to do this without a mutex
+ if *v.last_access.lock().await <= expire_ts {
+ // Validate that this is the only reference to the database
+ let ref_count = Arc::strong_count(&v);
+ if ref_count == 1 {
+ to_remove.push(k.clone());
+ } else {
+ tracing::warn!(?ref_count, ?k, "sqlite pool is expired and should have no references, but still has references");
+ }
+ }
+ }
+
+ // Evict each entry
+ for key in to_remove {
+ match self.evict_database_inner(&key, &mut pools_guard).await {
+ Ok(_) => {
+ removed += 1;
+ }
+ Err(err) => {
+ tracing::error!(
+ ?err,
+ ?key,
+ "failed to evict sqlite db, will retry later"
+ );
+ }
+ }
+ }
+
+ tracing::debug!(?removed, total = ?pools_guard.len(), "gc sqlite pools");
+ }
+ }
+ }
+
+ /// Snapshots the current state of a SQLite database to FDB.
+ ///
+ /// This will acquire an exclusive lock on the database to ensure consistency.
+ ///
+ /// We can do this because we don't use WAL (since we don't need concurrent readers/writers).
+ ///
+ /// We don't use `VACUUM FULL` because it requires significant overhead to execute frequently,
+ /// which we don't need since we don't use a WAL.
+ ///
+ /// We don't use the `.backup` command (or `sqlite3_backup_*`) because it still has some
+ /// overhead.
+ async fn snapshot_sqlite_db(&self, entry: &SqlitePool) -> GlobalResult<()> {
+ ensure!(
+ !entry.read_only,
+ "attempting to snapshot a read-only sqlite database"
+ );
+
+ // Only run if snapshotting required
+ let SqliteStorage::FoundationDb = self.storage else {
+ return Ok(());
+ };
+
+ tracing::debug!("snapshotting sqlite database");
+
+ let mut conn = entry.conn.lock().await;
+
+ // Start an IMMEDIATE transaction to prevent other write transactions
+ sqlx::query("BEGIN IMMEDIATE TRANSACTION;")
+ .execute(&mut *conn)
+ .await
+ .map_err(|e| Error::BuildSqlx(e))?;
greptile
logic: Transaction rollback should be handled in a separate function with proper error propagation, as failed rollbacks could leave the database in an inconsistent state
diff block
const CLIEngine = require('eslint').CLIEngine;
const listChangedFiles = require('../shared/listChangedFiles');
-const allPaths = ['**/*.js'];
+const allPaths = ['**/*.js', '**/*.jsx', '**/*.ts', '**/*.tsx'];
let changedFiles = null;
+let eslintCache = new Map();
-function runESLintOnFilesWithOptions(filePatterns, onlyChanged, options) {
- const cli = new CLIEngine(options);
- const formatter = cli.getFormatter();
+async function runESLintOnFilesWithOptions(filePatterns, onlyChanged, options = {}) {
+ const defaultOptions = {
+ cache: true,
+ cacheLocation: '.eslintcache',
+ fix: false,
+ maxWarnings: 100,
+ ...options
+ };
+
+ const cli = new CLIEngine(defaultOptions);
+ const formatter = cli.getFormatter('stylish');
if (onlyChanged && changedFiles === null) {
- // Calculate lazily.
- changedFiles = [...listChangedFiles()];
+ try {
+ changedFiles = [...await listChangedFiles()];
+ changedFiles.forEach(file => {
+ if (!eslintCache.has(file)) {
+ eslintCache.set(file, null);
+ }
+ });
+ } catch (error) {
+ console.error('Error getting changed files:', error);
+ throw error;
+ }
}
greptile
logic: Cache initialization happens even if listChangedFiles() fails, which could lead to inconsistent state. Move the cache initialization inside the try block after changedFiles is successfully populated.
suggested fix
+ if (onlyChanged && changedFiles === null) {
try {
+ const files = [...await listChangedFiles()];
+ files.forEach(file => {
if (!eslintCache.has(file)) {
eslintCache.set(file, null);
}
});
+ changedFiles = files;
} catch (error) {
console.error('Error getting changed files:', error);
throw error;
}
}
Want to avoid this bug in your codebase? Try Greptile.
Avoid this bug!