87 examples
Infinite loop
Loop continuously runs without ending, freezing the system.
[ FAQ1 ]
What is an infinite loop?
An infinite loop occurs when a program repeatedly executes a loop without a terminating condition being met, causing the program or application to become unresponsive or hang. Typically, loops (such as
for
loops or while
loops) rely on a condition to decide whether to continue iterating or exit. If this condition always remains true or is incorrectly written, the loop never terminates. Infinite loops consume CPU resources indefinitely, causing performance issues and requiring manual interruption or termination of the application.
[ FAQ2 ]
How is an infinite loop created?
Infinite loops are usually created by improper loop conditions or faulty logic within the loop itself. Common scenarios include a
while
loop with a condition that never changes or always evaluates to true, or a for
loop without a proper increment or termination condition. For instance, forgetting to increment a loop variable or misplacing loop control logic can cause the loop to repeat endlessly. To resolve or prevent infinite loops, carefully review loop logic, use debugging tools to monitor loop variables, and ensure loop conditions have a clear and achievable termination point.
diff block
mod common;
use common::*;
-#[tokio::test(flavor = "multi_thread")]
-async fn fdb_sqlite_nats_driver() {
+#[test]
+fn fdb_sqlite_nats_driver() {
setup_tracing();
- setup_dependencies(true).await;
+ {
+ let rt = tokio::runtime::Runtime::new().unwrap();
+ rt.block_on(setup_dependencies(false));
+ }
+
+ loop {
+ let rt = tokio::runtime::Runtime::new().unwrap();
+ rt.block_on(fdb_sqlite_nats_driver_inner());
+ }
greptile
logic: Infinite loop with no exit condition will cause test to run forever and never complete
suggested fix
let rt = tokio::runtime::Runtime::new().unwrap(); rt.block_on(fdb_sqlite_nats_driver_inner());
diff block
+#
+# This is a picoweb example showing various header parsing modes.
+#
+import ure as re
+import picoweb
+
+
+def index(req, resp):
+ yield from resp.awrite("HTTP/1.0 200 OK\r\n")
+ yield from resp.awrite("Content-Type: text/html\r\n")
+ yield from resp.awrite("\r\n")
+ yield from resp.awrite('<li><a href="mode_parse">header_mode="parse"</a>')
+ yield from resp.awrite('<li><a href="mode_skip">header_mode="skip"</a>')
+ yield from resp.awrite('<li><a href="mode_leave">header_mode="leave"</a>')
+
+
+def headers_parse(req, resp):
+ yield from picoweb.start_response(resp)
+ yield from resp.awrite("<table border='1'>")
+ for h, v in req.headers.items():
+ yield from resp.awrite("<tr><td>%s</td><td>%s</td></tr>\r\n" % (h, v))
+ yield from resp.awrite("</table>")
+
+def headers_skip(req, resp):
+ yield from picoweb.start_response(resp)
+ assert not hasattr(req, "headers")
+ yield from resp.awrite("No <tt>req.headers</tt>.")
+
+def headers_leave(req, resp):
+ yield from picoweb.start_response(resp)
+ assert not hasattr(req, "headers")
+ yield from resp.awrite("Reading headers directly from input request:")
+ yield from resp.awrite("<pre>")
+ while True:
+ l = yield from req.reader.readline()
+ if l == b"\r\n":
+ break
+ yield from resp.awrite(l)
greptile
logic: infinite loop risk if request is malformed and never sends '\r\n' - should add timeout or max line count
diff block
}
}
}
- .frame(
- minHeight: min(maxHeight, contentHeight),
- idealHeight: min(maxHeight, contentHeight),
- maxHeight: maxHeight
- )
- .onAppear {
- updateWindowTopOffset()
- }
- .onReceive(NotificationCenter.default.publisher(for: NSWindow.didMoveNotification)) { _ in
- updateWindowTopOffset()
- }
- .onReceive(NotificationCenter.default.publisher(for: NSWindow.didResizeNotification)) { _ in
- updateWindowTopOffset()
- }
- }
-
- func updateWindowTopOffset() {
- if let window = model.panel {
- let screenHeight = NSScreen.main!.frame.height
- let windowTopY = window.frame.origin.y + window.frame.size.height
- windowTopOffset = screenHeight - windowTopY
- }
}
var heightReader: some View {
GeometryReader { proxy in
Color.clear
.onAppear {
- model.contentHeight = proxy.size.height
+ let oldHeight = realHeight
+ contentHeight = proxy.size.height
+
+ if oldHeight != realHeight {
+ model.adjustPanelSize()
+ }
greptile
logic: potential infinite loop if adjustPanelSize triggers layout updates that change contentHeight
diff block
interval: Optional[IntervalType],
now: datetime,
) -> None:
- self.total_intervals = total_intervals
super().__init__(date_range, team, interval, now)
+ # intervals to look ahead for return event
+ self.lookahead = total_intervals
@staticmethod
- def determine_time_delta(total_intervals: int, period: str) -> timedelta:
- period_map = {
- "minute": timedelta(minutes=1),
- "hour": timedelta(hours=1),
- "day": timedelta(days=1),
- "week": timedelta(weeks=1),
- "month": relativedelta(months=1),
- }
-
- if period.lower() not in period_map:
+ def determine_time_delta(interval: int, period: str) -> timedelta:
+ if period.lower() not in PERIOD_MAP:
raise ValueError(f"Period {period} is unsupported.")
- return period_map[period.lower()] * total_intervals
+ return PERIOD_MAP[period.lower()] * interval
+
+ @cached_property
+ def intervals_between(self):
+ """
+ Number of intervals between date_from and date_to
+ """
+ date_from = self.date_from()
+ delta = PERIOD_MAP[self._interval.lower()]
+
+ intervals = 0
+ while date_from < self.date_to():
+ date_from = date_from + delta
+ intervals += 1
+
greptile
logic: potential infinite loop if date_from >= date_to
suggested fix
intervals = 0
+ if date_from >= self.date_to():
+ return 0
while date_from < self.date_to():
date_from = date_from + delta
intervals += 1
diff block
for {
line, err := reader.ReadString('\n')
if err != nil {
- if err == io.EOF {
- break
+ if err != io.EOF {
+ return nil, err
}
- return nil, err
}
greptile
logic: EOF handling may cause infinite loop - the loop continues even after EOF is encountered
suggested fix
+ for {
+ line, err := reader.ReadString('\n')
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
return nil, err
+ }
diff block
+import { promiseWithResolvers } from "../../utils/promiseWithResolvers";
+import {
+ parsePartialJsonObject,
+ getPartialJsonObjectFieldState,
+} from "../../utils/json/parse-partial-json-object";
+import {
+ ToolCallArgsReader,
+ ToolCallReader,
+ ToolCallResultReader,
+} from "./tool-types";
+import { TypeAtPath, TypePath } from "./type-path-utils";
+
+function getField<T>(obj: T, fieldPath: (string | number)[]): any {
+ let current: any = obj;
+ for (const key of fieldPath) {
+ if (current === undefined || current === null) {
+ return undefined;
+ }
+ current = current[key as string | number];
+ }
+ return current;
+}
+
+interface Handle {
+ update(args: unknown): void;
+ dispose(): void;
+}
+
+class GetHandle<T> implements Handle {
+ private resolve: (value: any) => void;
+ private reject: (reason: unknown) => void;
+ private disposed = false;
+ private fieldPath: (string | number)[];
+
+ constructor(
+ resolve: (value: any) => void,
+ reject: (reason: unknown) => void,
+ fieldPath: (string | number)[],
+ ) {
+ this.resolve = resolve;
+ this.reject = reject;
+ this.fieldPath = fieldPath;
+ }
+
+ update(args: unknown): void {
+ if (this.disposed) return;
+
+ try {
+ // Check if the field is complete
+ if (
+ getPartialJsonObjectFieldState(
+ args as Record<string, unknown>,
+ this.fieldPath,
+ ) === "complete"
+ ) {
+ const value = getField(args as T, this.fieldPath);
+ if (value !== undefined) {
+ this.resolve(value);
+ this.dispose();
+ }
+ }
+ } catch (e) {
+ this.reject(e);
+ this.dispose();
+ }
+ }
+
+ dispose(): void {
+ this.disposed = true;
+ }
+}
+
+class StreamValuesHandle<T> implements Handle {
+ private controller: ReadableStreamDefaultController<any>;
+ private disposed = false;
+ private fieldPath: (string | number)[];
+ private lastValue: any = undefined;
+
+ constructor(
+ controller: ReadableStreamDefaultController<any>,
+ fieldPath: (string | number)[],
+ ) {
+ this.controller = controller;
+ this.fieldPath = fieldPath;
+ }
+
+ update(args: unknown): void {
+ if (this.disposed) return;
+
+ try {
+ const value = getField(args as T, this.fieldPath);
+
+ // Only emit if the value is defined and different from the last emitted value
+ if (
+ value !== undefined &&
+ JSON.stringify(value) !== JSON.stringify(this.lastValue)
+ ) {
+ this.lastValue = value;
+ this.controller.enqueue(value);
+ }
+
+ // Check if the field is complete, if so close the stream
+ if (
+ getPartialJsonObjectFieldState(
+ args as Record<string, unknown>,
+ this.fieldPath,
+ ) === "complete"
+ ) {
+ // Ensure we make a final emit if needed
+ if (
+ value !== undefined &&
+ JSON.stringify(value) !== JSON.stringify(this.lastValue)
+ ) {
+ this.controller.enqueue(value);
+ }
+
+ this.controller.close();
+ this.dispose();
+ }
+ } catch (e) {
+ this.controller.error(e);
+ this.dispose();
+ }
+ }
+
+ dispose(): void {
+ this.disposed = true;
+ }
+}
+
+class ForEachHandle<T> implements Handle {
+ private controller: ReadableStreamDefaultController<any>;
+ private disposed = false;
+ private fieldPath: (string | number)[];
+ private processedIndexes = new Set<number>();
+
+ constructor(
+ controller: ReadableStreamDefaultController<any>,
+ fieldPath: (string | number)[],
+ ) {
+ this.controller = controller;
+ this.fieldPath = fieldPath;
+ }
+
+ update(args: unknown): void {
+ if (this.disposed) return;
+
+ try {
+ const array = getField(args as T, this.fieldPath) as unknown as any[];
+
+ if (!Array.isArray(array)) {
+ return;
+ }
+
+ // Check each array element and emit completed ones that haven't been processed
+ for (let i = 0; i < array.length; i++) {
+ if (!this.processedIndexes.has(i)) {
+ const elementPath = [...this.fieldPath, i];
+ if (
+ getPartialJsonObjectFieldState(
+ args as Record<string, unknown>,
+ elementPath,
+ ) === "complete"
+ ) {
+ this.controller.enqueue(array[i]);
+ this.processedIndexes.add(i);
+ }
+ }
+ }
+
+ // Check if the entire array is complete
+ if (
+ getPartialJsonObjectFieldState(
+ args as Record<string, unknown>,
+ this.fieldPath,
+ ) === "complete"
+ ) {
+ this.controller.close();
+ this.dispose();
+ }
+ } catch (e) {
+ this.controller.error(e);
+ this.dispose();
+ }
+ }
+
+ dispose(): void {
+ this.disposed = true;
+ }
+}
+
+// Implementation of ToolCallReader that uses stream of partial JSON
+export class ToolCallArgsReaderImpl<T> implements ToolCallArgsReader<T> {
+ private argTextDeltas: ReadableStream<string>;
+ private handles: Set<Handle> = new Set();
+ private args: any = parsePartialJsonObject("");
+
+ constructor(argTextDeltas: ReadableStream<string>) {
+ this.argTextDeltas = argTextDeltas;
+ this.processStream();
+ }
+
+ private async processStream(): Promise<void> {
+ try {
+ let accumulatedText = "";
+ const reader = this.argTextDeltas.getReader();
+
+ while (true) {
+ const { value, done } = await reader.read();
+ if (done) break;
+
greptile
logic: infinite loop if reader.read() rejects - should release reader in finally block
suggested fix
const reader = this.argTextDeltas.getReader(); try { while (true) { const { value, done } = await reader.read(); if (done) break;
diff block
import { contextStoreCurrentViewIdComponentState } from '@/context-store/states/contextStoreCurrentViewIdComponentState';
-import { useLastVisitedObjectMetadataItem } from '@/navigation/hooks/useLastVisitedObjectMetadataItem';
-import { useLastVisitedView } from '@/navigation/hooks/useLastVisitedView';
-import { useFilteredObjectMetadataItems } from '@/object-metadata/hooks/useFilteredObjectMetadataItems';
import { useRecoilComponentStateV2 } from '@/ui/utilities/state/component-state/hooks/useRecoilComponentStateV2';
-import { useSetRecoilComponentStateV2 } from '@/ui/utilities/state/component-state/hooks/useSetRecoilComponentStateV2';
-import { useViewFromQueryParams } from '@/views/hooks/internal/useViewFromQueryParams';
-import { useGetCurrentView } from '@/views/hooks/useGetCurrentView';
+import { useRecoilComponentValueV2 } from '@/ui/utilities/state/component-state/hooks/useRecoilComponentValueV2';
import { currentViewIdComponentState } from '@/views/states/currentViewIdComponentState';
-import { isUndefined } from '@sniptt/guards';
import { useEffect } from 'react';
import { isDefined } from 'twenty-shared';
-import { isDeeplyEqual } from '~/utils/isDeeplyEqual';
-
-export const QueryParamsViewIdEffect = () => {
- const { getFiltersFromQueryParams, viewIdQueryParam } =
- useViewFromQueryParams();
-
- // TODO: fix this implicit hack
- const { instanceId: objectNamePlural } = useGetCurrentView();
+// TODO: This whole code should be removed. currentViewId should be used directly to set the mainContextStore
+// and viewbar / view tooling should be updated to use that state contextStore state directly.
+export const QueryParamsViewIdEffect = ({
+ objectNamePlural,
+}: {
+ objectNamePlural: string;
+}) => {
const [currentViewId, setCurrentViewId] = useRecoilComponentStateV2(
currentViewIdComponentState,
);
- const { viewsOnCurrentObject } = useGetCurrentView();
- const { findObjectMetadataItemByNamePlural } =
- useFilteredObjectMetadataItems();
- const objectMetadataItemId =
- findObjectMetadataItemByNamePlural(objectNamePlural);
- const { getLastVisitedViewIdFromObjectNamePlural, setLastVisitedView } =
- useLastVisitedView();
- const { lastVisitedObjectMetadataItemId, setLastVisitedObjectMetadataItem } =
- useLastVisitedObjectMetadataItem();
-
- const lastVisitedViewId =
- getLastVisitedViewIdFromObjectNamePlural(objectNamePlural);
- const isLastVisitedObjectMetadataItemDifferent = !isDeeplyEqual(
- objectMetadataItemId?.id,
- lastVisitedObjectMetadataItemId,
- );
- const setContextStoreCurrentViewId = useSetRecoilComponentStateV2(
+ const contextStoreCurrentViewId = useRecoilComponentValueV2(
contextStoreCurrentViewIdComponentState,
+ objectNamePlural,
);
- // // TODO: scope view bar per view id if possible
- // const { resetCurrentView } = useResetCurrentView();
-
- // useEffect(() => {
- // if (isDefined(currentViewId)) {
- // resetCurrentView();
- // }
- // }, [resetCurrentView, currentViewId]);
-
useEffect(() => {
- const indexView = viewsOnCurrentObject.find((view) => view.key === 'INDEX');
-
- if (isUndefined(viewIdQueryParam) && isDefined(lastVisitedViewId)) {
- if (isLastVisitedObjectMetadataItemDifferent) {
- setLastVisitedObjectMetadataItem(objectNamePlural);
- setLastVisitedView({
- objectNamePlural,
- viewId: lastVisitedViewId,
- });
- }
- setCurrentViewId(lastVisitedViewId);
- setContextStoreCurrentViewId(lastVisitedViewId);
- return;
- }
-
- if (isDefined(viewIdQueryParam)) {
- if (isLastVisitedObjectMetadataItemDifferent) {
- setLastVisitedObjectMetadataItem(objectNamePlural);
- }
- if (!isDeeplyEqual(viewIdQueryParam, lastVisitedViewId)) {
- setLastVisitedView({
- objectNamePlural,
- viewId: viewIdQueryParam,
- });
+ if (isDefined(contextStoreCurrentViewId)) {
+ if (currentViewId !== contextStoreCurrentViewId) {
+ console.log('setting currentViewId', contextStoreCurrentViewId);
+ setCurrentViewId(contextStoreCurrentViewId);
}
- setCurrentViewId(viewIdQueryParam);
- setContextStoreCurrentViewId(viewIdQueryParam);
- return;
}
-
- if (isDefined(indexView)) {
- if (isLastVisitedObjectMetadataItemDifferent) {
- setLastVisitedObjectMetadataItem(objectNamePlural);
- }
- if (!isDeeplyEqual(indexView.id, lastVisitedViewId)) {
- setLastVisitedView({
- objectNamePlural,
- viewId: indexView.id,
- });
- }
- setCurrentViewId(indexView.id);
- setContextStoreCurrentViewId(indexView.id);
- return;
- }
-
- return () => {
- setContextStoreCurrentViewId(null);
- };
- }, [
- currentViewId,
- getFiltersFromQueryParams,
- isLastVisitedObjectMetadataItemDifferent,
- lastVisitedViewId,
- objectMetadataItemId?.id,
- objectNamePlural,
- setContextStoreCurrentViewId,
- setCurrentViewId,
- setLastVisitedObjectMetadataItem,
- setLastVisitedView,
- viewIdQueryParam,
- viewsOnCurrentObject,
- ]);
+ }, [contextStoreCurrentViewId, currentViewId, setCurrentViewId]);
greptile
logic: this effect could cause an infinite loop if both states keep updating each other - consider adding a flag to prevent circular updates
diff block
}
},
overrideVariableValue: () => {
- actions.loadDashboard({ action: 'preview' })
+ if (values.initialVariablesLoaded) {
+ actions.loadDashboard({ action: 'preview' })
+ }
+ },
greptile
logic: Potential infinite loop if initialVariablesLoaded is true but variables haven't actually been applied yet
diff block
echo "Waiting for services to be healthy..."
# Wait for PostgreSQL to be healthy
-while ! docker compose ps postgres | grep "healthy" > /dev/null; do
+while ! sudo docker-compose ps postgres | grep "healthy" > /dev/null; do
echo "Waiting for PostgreSQL..."
sleep 5
done
greptile
style: Consider adding a timeout to prevent infinite loop if PostgreSQL never becomes healthy
suggested fix
+MAX_RETRIES=60 # 5 minutes timeout
+RETRIES=0
while ! sudo docker-compose ps postgres | grep "healthy" > /dev/null; do
+ echo "Waiting for PostgreSQL..."
+ sleep 5
+ RETRIES=$((RETRIES + 1))
+ if [ $RETRIES -ge $MAX_RETRIES ]; then
+ echo "Timeout waiting for PostgreSQL to become healthy"
+ exit 1
+ fi
+done
diff block
+use std::{fmt, result::Result::Ok, str::FromStr};
+
+use anyhow::*;
+use clap::ValueEnum;
+use foundationdb::tuple::{PackResult, TupleDepth, TuplePack, TupleUnpack, VersionstampOffset};
+use rivet_term::console::style;
+use uuid::Uuid;
+
+use crate::util::format::colored_json;
+
+#[derive(Debug, ValueEnum, Clone, Copy, PartialEq)]
+pub enum ListStyle {
+ List,
+ Tree,
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum SimpleTupleValue {
+ U64(u64),
+ I64(i64),
+ F64(f64),
+ Uuid(Uuid),
+ String(String),
+ Bytes(Vec<u8>),
+}
+
+impl SimpleTupleValue {
+ fn parse(value: &str) -> Self {
+ if let Ok(v) = value.parse::<u64>() {
+ SimpleTupleValue::U64(v)
+ } else if let Ok(v) = value.parse::<i64>() {
+ SimpleTupleValue::I64(v)
+ } else if let Ok(v) = value.parse::<f64>() {
+ SimpleTupleValue::F64(v)
+ } else if let Ok(v) = Uuid::from_str(value) {
+ SimpleTupleValue::Uuid(v)
+ } else {
+ SimpleTupleValue::String(unescape(value))
+ }
+ }
+}
+
+impl fmt::Display for SimpleTupleValue {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self {
+ SimpleTupleValue::U64(v) => write!(f, "{}", style(v).cyan()),
+ SimpleTupleValue::I64(v) => write!(f, "{}", style(v).magenta()),
+ SimpleTupleValue::F64(v) => write!(f, "{}", style(v).red()),
+ SimpleTupleValue::Uuid(v) => write!(f, "{}", style(v).blue()),
+ SimpleTupleValue::String(v) => {
+ if v.is_empty() {
+ write!(f, "{}", style("<empty>").dim())
+ } else {
+ write!(f, "{}", style(v).green())
+ }
+ }
+ SimpleTupleValue::Bytes(v) => write!(f, "{:?}", style(v).italic()),
+ }
+ }
+}
+
+impl TuplePack for SimpleTupleValue {
+ fn pack<W: std::io::Write>(
+ &self,
+ w: &mut W,
+ tuple_depth: TupleDepth,
+ ) -> std::io::Result<VersionstampOffset> {
+ match self {
+ SimpleTupleValue::U64(v) => v.pack(w, tuple_depth),
+ SimpleTupleValue::I64(v) => v.pack(w, tuple_depth),
+ SimpleTupleValue::F64(v) => v.pack(w, tuple_depth),
+ SimpleTupleValue::Uuid(v) => v.pack(w, tuple_depth),
+ SimpleTupleValue::String(v) => v.pack(w, tuple_depth),
+ SimpleTupleValue::Bytes(v) => {
+ w.write_all(v)?;
+ Ok(VersionstampOffset::None {
+ size: u32::try_from(v.len())
+ .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?,
+ })
+ }
+ }
+ }
+}
+
+impl<'de> TupleUnpack<'de> for SimpleTupleValue {
+ fn unpack(input: &[u8], tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> {
+ if let Ok((input, v)) = <i64>::unpack(input, tuple_depth) {
+ let v = SimpleTupleValue::I64(v);
+ Ok((input, v))
+ } else if let Ok((input, v)) = <f64>::unpack(input, tuple_depth) {
+ let v = SimpleTupleValue::F64(v);
+ Ok((input, v))
+ } else if let Ok((input, v)) = <Uuid>::unpack(input, tuple_depth) {
+ let v = SimpleTupleValue::Uuid(v);
+ Ok((input, v))
+ } else if let Ok((input, v)) = <String>::unpack(input, tuple_depth) {
+ let v = SimpleTupleValue::String(v);
+ Ok((input, v))
+ } else {
+ let v = SimpleTupleValue::Bytes(input.to_vec());
+ Ok((&input[0..0], v))
+ }
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum SimpleValue {
+ U64(u64),
+ F64(f64),
+ I64(i64),
+ Uuid(Uuid),
+ Json(serde_json::Value),
+ String(String),
+ Bytes(Vec<u8>),
+}
+
+impl SimpleValue {
+ pub fn parse_bytes(type_hint: Option<&str>, value: &[u8]) -> Result<Self> {
+ let parsed_value = match type_hint {
+ Some("u64") => SimpleValue::U64(u64::from_be_bytes(
+ value
+ .try_into()
+ .with_context(|| format!("Could not parse `{value:?}` as u64"))?,
+ )),
+ Some("i64") => SimpleValue::I64(i64::from_be_bytes(
+ value
+ .try_into()
+ .with_context(|| format!("Could not parse `{value:?}` as i64"))?,
+ )),
+ Some("f64") => SimpleValue::F64(f64::from_be_bytes(
+ value
+ .try_into()
+ .with_context(|| format!("Could not parse `{value:?}` as f64"))?,
+ )),
+ Some("uuid") => Uuid::from_slice(value)
+ .map(SimpleValue::Uuid)
+ .with_context(|| format!("Could not parse `{value:?}` as UUID"))?,
+ Some("json") => {
+ let s = std::str::from_utf8(value)
+ .with_context(|| format!("Could not parse `{value:?}` as JSON"))?;
+ let v = serde_json::from_str::<serde_json::Value>(s)
+ .with_context(|| format!("Could not parse `{value:?}` as JSON"))?;
+ SimpleValue::Json(v)
+ }
+ Some("str") => std::str::from_utf8(value)
+ .map(|x| x.to_string())
+ .map(SimpleValue::String)
+ .with_context(|| format!("Could not parse `{value:?}` as string"))?,
+ Some("bytes") | Some("b") => SimpleValue::Bytes(value.to_vec()),
+ Some(type_hint) => bail!("unknown type: `{type_hint}`"),
+ _ => {
+ if let Ok(value) = value.try_into() {
+ SimpleValue::I64(i64::from_be_bytes(value))
+ } else if let Ok(v) = Uuid::from_slice(value) {
+ SimpleValue::Uuid(v)
+ } else if let Ok(v) = serde_json::from_slice(value) {
+ SimpleValue::Json(v)
+ } else if let Ok(v) = std::str::from_utf8(value) {
+ SimpleValue::String(v.to_string())
+ } else {
+ SimpleValue::Bytes(value.to_vec())
+ }
+ }
+ };
+
+ Ok(parsed_value)
+ }
+
+ pub fn parse_str(type_hint: Option<&str>, value: &str) -> Result<Self> {
+ let mut escaped = false;
+
+ let mut chars = value.chars().enumerate();
+
+ let (type_hint, value) = if type_hint.is_some() {
+ (type_hint, value)
+ } else {
+ let prefix_end_idx = loop {
+ let Some((i, c)) = chars.next() else {
+ break None;
+ };
+
+ match c {
+ '\\' => escaped = !escaped,
+ ':' if !escaped => break Some(i),
+ _ => escaped = false,
+ }
+ };
+
+ let type_hint = prefix_end_idx.map(|x| &value[..x]);
+ let value = &value[prefix_end_idx.map(|x| x + 1).unwrap_or_default()..];
+
+ (type_hint, value)
+ };
+
+ let parsed_value = match type_hint {
+ Some("u64") => value
+ .parse::<u64>()
+ .map(SimpleValue::U64)
+ .with_context(|| format!("Could not parse `{value}` as u64"))?,
+ Some("i64") => value
+ .parse::<i64>()
+ .map(SimpleValue::I64)
+ .with_context(|| format!("Could not parse `{value}` as i64"))?,
+ Some("f64") => value
+ .parse::<f64>()
+ .map(SimpleValue::F64)
+ .with_context(|| format!("Could not parse `{value}` as f64"))?,
+ Some("uuid") => Uuid::from_str(value)
+ .map(SimpleValue::Uuid)
+ .with_context(|| format!("Could not parse `{value}` as UUID"))?,
+ Some("json") => {
+ let v = serde_json::from_str::<serde_json::Value>(value)
+ .with_context(|| format!("Could not parse `{value}` as JSON"))?;
+ let s = serde_json::to_string(&v)
+ .with_context(|| format!("Could not parse `{value}` as JSON"))?;
+ SimpleValue::String(s)
+ }
+ Some("str") => SimpleValue::String(value.to_string()),
+ Some("bytes") | Some("b") => {
+ let bytes = hex::decode(value.as_bytes())
+ .with_context(|| format!("Could not parse `{value:?}` as hex encoded bytes"))?;
+ SimpleValue::Bytes(bytes)
+ }
+ Some(type_hint) => bail!("unknown type: `{type_hint}`"),
+ _ => SimpleTupleValue::parse(value).into(),
+ };
+
+ Ok(parsed_value)
+ }
+
+ pub fn serialize(&self) -> Result<Vec<u8>> {
+ let v = match self {
+ SimpleValue::U64(v) => v.to_be_bytes().to_vec(),
+ SimpleValue::I64(v) => v.to_be_bytes().to_vec(),
+ SimpleValue::F64(v) => v.to_be_bytes().to_vec(),
+ SimpleValue::Uuid(v) => v.as_bytes().to_vec(),
+ SimpleValue::Json(v) => serde_json::to_vec(v)?,
+ SimpleValue::String(v) => v.as_bytes().to_vec(),
+ SimpleValue::Bytes(v) => v.clone(),
+ };
+
+ Ok(v)
+ }
+}
+
+impl fmt::Display for SimpleValue {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self {
+ SimpleValue::U64(v) => write!(f, "{}", style(v).cyan()),
+ SimpleValue::I64(v) => write!(f, "{}", style(v).magenta()),
+ SimpleValue::F64(v) => write!(f, "{}", style(v).red()),
+ SimpleValue::Uuid(v) => write!(f, "{}", style(v).blue()),
+ SimpleValue::Json(v) => {
+ if let Ok(json) = colored_json(v) {
+ write!(f, "{json}")
+ } else {
+ write!(f, "{}", style(v).yellow())
+ }
+ }
+ SimpleValue::String(v) => write!(f, "{}", style(v).green()),
+ SimpleValue::Bytes(v) => write!(f, "{:?}", style(v).italic()),
+ }
+ }
+}
+
+impl From<SimpleTupleValue> for SimpleValue {
+ fn from(value: SimpleTupleValue) -> Self {
+ match value {
+ SimpleTupleValue::U64(v) => SimpleValue::U64(v),
+ SimpleTupleValue::I64(v) => SimpleValue::I64(v),
+ SimpleTupleValue::F64(v) => SimpleValue::F64(v),
+ SimpleTupleValue::Uuid(v) => SimpleValue::Uuid(v),
+ SimpleTupleValue::String(v) => SimpleValue::String(v),
+ SimpleTupleValue::Bytes(v) => SimpleValue::Bytes(v),
+ }
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct SimpleTupleSegment {
+ value: SimpleTupleValue,
+}
+
+impl SimpleTupleSegment {
+ fn parse(prefix: Option<&str>, value: &str) -> Result<Self> {
+ let parsed_value = match prefix {
+ Some("u64") => value
+ .parse::<u64>()
+ .map(SimpleTupleValue::U64)
+ .with_context(|| format!("Could not parse `{value}` as u64"))?,
+ Some("i64") => value
+ .parse::<i64>()
+ .map(SimpleTupleValue::I64)
+ .with_context(|| format!("Could not parse `{value}` as i64"))?,
+ Some("f64") => value
+ .parse::<f64>()
+ .map(SimpleTupleValue::F64)
+ .with_context(|| format!("Could not parse `{value}` as f64"))?,
+ Some("uuid") => Uuid::from_str(value)
+ .map(SimpleTupleValue::Uuid)
+ .with_context(|| format!("Could not parse `{value}` as UUID"))?,
+ Some("str") => SimpleTupleValue::String(value.to_string()),
+ Some(prefix) => bail!("unknown type: `{prefix}`"),
+ _ => SimpleTupleValue::parse(value),
+ };
+
+ Ok(SimpleTupleSegment {
+ value: parsed_value,
+ })
+ }
+}
+
+impl fmt::Display for SimpleTupleSegment {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.value)
+ }
+}
+
+impl TuplePack for SimpleTupleSegment {
+ fn pack<W: std::io::Write>(
+ &self,
+ w: &mut W,
+ tuple_depth: TupleDepth,
+ ) -> std::io::Result<VersionstampOffset> {
+ self.value.pack(w, tuple_depth)
+ }
+}
+
+impl<'de> TupleUnpack<'de> for SimpleTupleSegment {
+ fn unpack(input: &[u8], tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> {
+ let (input, v) = SimpleTupleValue::unpack(input, tuple_depth)?;
+ let v = SimpleTupleSegment { value: v };
+
+ Ok((input, v))
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct SimpleTuple {
+ pub segments: Vec<SimpleTupleSegment>,
+}
+
+impl SimpleTuple {
+ pub fn new() -> Self {
+ SimpleTuple {
+ segments: Vec::new(),
+ }
+ }
+
+ pub fn slice(&self, n: usize) -> Self {
+ SimpleTuple {
+ segments: self.segments.iter().take(n).cloned().collect(),
+ }
+ }
+
+ pub fn parse(value: &str) -> Result<(Self, bool, usize)> {
+ let mut segments = Vec::new();
+ let mut back_count = 0;
+ let mut normal_segment_encountered = false;
+ let mut start = 0;
+ let mut prefix = None;
+ let mut escaped = false;
+
+ let chars = value
+ .chars()
+ .chain((!value.ends_with('/')).then_some('/'))
+ .enumerate();
+ for (i, c) in chars {
+ match c {
+ '/' => {
+ if i > start {
+ let segment = &value[start..i];
+
+ // Parse back
+ if segment == ".." {
+ if normal_segment_encountered {
+ bail!("Invalid path: '..' cannot go after other segments");
+ }
+
+ back_count += 1;
+ } else if segment == "." {
+ // Noop
+ } else {
+ // Parse normal segment
+ normal_segment_encountered = true;
+ segments
+ .push(SimpleTupleSegment::parse(prefix.take(), segment.trim())?);
+ }
+ } else if start != 0 && i == start {
+ segments.push(SimpleTupleSegment::parse(prefix.take(), "")?);
+ }
+
+ start = i + 1;
+ }
+ '\\' => escaped = !escaped,
+ ':' if !escaped && prefix.is_none() => {
+ prefix = Some(&value[start..i]);
+ start = i + 1;
+ }
+ _ => escaped = false,
+ }
+ }
+
+ Ok((
+ SimpleTuple { segments },
+ !value.starts_with('/'),
+ back_count,
+ ))
+ }
+
+ pub fn print(&self, list_style: &ListStyle, last_key: &SimpleTuple) {
+ match list_style {
+ ListStyle::List => {
+ print!(" {self}");
+ }
+ ListStyle::Tree => {
+ let common_prefix_len = self
+ .segments
+ .iter()
+ .zip(&last_key.segments)
+ .take_while(|(a, b)| a == b)
+ .count();
+
+ for (i, segment) in self.segments.iter().skip(common_prefix_len).enumerate() {
+ print!(
+ " {}/{}",
+ style("| ".repeat(common_prefix_len + i)).dim(),
+ segment
+ );
+ if i != self.segments.len() - common_prefix_len - 1 {
+ println!();
+ }
+ }
+ }
+ };
+ }
+}
+
+impl fmt::Display for SimpleTuple {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.segments.is_empty() {
+ write!(f, "/")?;
+ } else {
+ for segment in &self.segments {
+ write!(f, "/{segment}")?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl TuplePack for SimpleTuple {
+ fn pack<W: std::io::Write>(
+ &self,
+ w: &mut W,
+ tuple_depth: TupleDepth,
+ ) -> std::io::Result<VersionstampOffset> {
+ let mut offset = VersionstampOffset::None { size: 0 };
+
+ for segment in &self.segments {
+ offset += segment.pack(w, tuple_depth)?;
+ }
+
+ Ok(offset)
+ }
+}
+
+impl<'de> TupleUnpack<'de> for SimpleTuple {
+ fn unpack(input: &[u8], tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> {
+ let mut input = input;
+ let mut segments = Vec::new();
+
+ loop {
+ let (i, v) = SimpleTupleSegment::unpack(input, tuple_depth)?;
+ input = i;
+ segments.push(v);
+
+ if input.is_empty() {
+ break;
+ }
+ }
greptile
logic: Infinite loop possible if unpack fails without consuming input
diff block
+from datetime import datetime
+from io import BytesIO
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+
+from onyx.configs.app_configs import INDEX_BATCH_SIZE
+from onyx.configs.constants import DocumentSource
+from onyx.connectors.highspot.client import HighspotClient
+from onyx.connectors.highspot.client import HighspotClientError
+from onyx.connectors.highspot.utils import scrape_url_content
+from onyx.connectors.interfaces import GenerateDocumentsOutput
+from onyx.connectors.interfaces import GenerateSlimDocumentOutput
+from onyx.connectors.interfaces import LoadConnector
+from onyx.connectors.interfaces import PollConnector
+from onyx.connectors.interfaces import SecondsSinceUnixEpoch
+from onyx.connectors.interfaces import SlimConnector
+from onyx.connectors.models import ConnectorMissingCredentialError
+from onyx.connectors.models import Document
+from onyx.connectors.models import Section
+from onyx.connectors.models import SlimDocument
+from onyx.file_processing.extract_file_text import extract_file_text
+from onyx.file_processing.extract_file_text import VALID_FILE_EXTENSIONS
+from onyx.indexing.indexing_heartbeat import IndexingHeartbeatInterface
+from onyx.utils.logger import setup_logger
+
+logger = setup_logger()
+_SLIM_BATCH_SIZE = 1000
+
+
+class HighspotConnector(LoadConnector, PollConnector, SlimConnector):
+ """
+ Connector for loading data from Highspot.
+
+ Retrieves content from specified spots using the Highspot API.
+ If no spots are specified, retrieves content from all available spots.
+ """
+
+ def __init__(
+ self,
+ spot_names: List[str] = [],
+ batch_size: int = INDEX_BATCH_SIZE,
+ ):
+ """
+ Initialize the Highspot connector.
+
+ Args:
+ spot_names: List of spot names to retrieve content from (if empty, gets all spots)
+ batch_size: Number of items to retrieve in each batch
+ """
+ self.spot_names = spot_names
+ self.batch_size = batch_size
+ self._client: Optional[HighspotClient] = None
+ self._spot_id_map: Dict[str, str] = {} # Maps spot names to spot IDs
+ self._all_spots_fetched = False
+ self.highspot_url: Optional[str] = None
+ self.key: Optional[str] = None
+ self.secret: Optional[str] = None
+
+ @property
+ def client(self) -> HighspotClient:
+ if self._client is None:
+ if not self.key or not self.secret:
+ raise ConnectorMissingCredentialError("Highspot")
+ # Ensure highspot_url is a string, use default if None
+ base_url = (
+ self.highspot_url
+ if self.highspot_url is not None
+ else HighspotClient.BASE_URL
+ )
+ self._client = HighspotClient(self.key, self.secret, base_url=base_url)
+ return self._client
+
+ def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
+ logger.info("Loading Highspot credentials")
+ self.highspot_url = credentials.get("highspot_url")
+ self.key = credentials.get("highspot_key")
+ self.secret = credentials.get("highspot_secret")
+ return None
+
+ def _populate_spot_id_map(self) -> None:
+ """
+ Populate the spot ID map with all available spots.
+ Keys are stored as lowercase for case-insensitive lookups.
+ """
+ spots = self.client.get_spots()
+ for spot in spots:
+ if "title" in spot and "id" in spot:
+ spot_name = spot["title"]
+ self._spot_id_map[spot_name.lower()] = spot["id"]
+
+ self._all_spots_fetched = True
+ logger.info(f"Retrieved {len(self._spot_id_map)} spots from Highspot")
+
+ def _get_all_spot_names(self) -> List[str]:
+ """
+ Retrieve all available spot names.
+
+ Returns:
+ List of all spot names
+ """
+ if not self._all_spots_fetched:
+ self._populate_spot_id_map()
+
+ return [spot_name for spot_name in self._spot_id_map.keys()]
+
+ def _get_spot_id_from_name(self, spot_name: str) -> str:
+ """
+ Get spot ID from a spot name.
+
+ Args:
+ spot_name: Name of the spot
+
+ Returns:
+ ID of the spot
+
+ Raises:
+ ValueError: If spot name is not found
+ """
+ if not self._all_spots_fetched:
+ self._populate_spot_id_map()
+
+ spot_name_lower = spot_name.lower()
+ if spot_name_lower not in self._spot_id_map:
+ raise ValueError(f"Spot '{spot_name}' not found")
+
+ return self._spot_id_map[spot_name_lower]
+
+ def load_from_state(self) -> GenerateDocumentsOutput:
+ """
+ Load content from configured spots in Highspot.
+ If no spots are configured, loads from all spots.
+
+ Yields:
+ Batches of Document objects
+ """
+ return self.poll_source(None, None)
+
+ def poll_source(
+ self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None
+ ) -> GenerateDocumentsOutput:
+ """
+ Poll Highspot for content updated since the start time.
+
+ Args:
+ start: Start time as seconds since Unix epoch
+ end: End time as seconds since Unix epoch
+
+ Yields:
+ Batches of Document objects
+ """
+ doc_batch: list[Document] = []
+
+ # If no spots specified, get all spots
+ spot_names_to_process = self.spot_names
+ if not spot_names_to_process:
+ spot_names_to_process = self._get_all_spot_names()
+ logger.info(
+ f"No spots specified, using all {len(spot_names_to_process)} available spots"
+ )
+
+ for spot_name in spot_names_to_process:
+ try:
+ spot_id = self._get_spot_id_from_name(spot_name)
+ if spot_id is None:
+ logger.warning(f"Spot ID not found for spot {spot_name}")
+ continue
+ offset = 0
+ has_more = True
+
+ while has_more:
+ logger.info(
+ f"Retrieving items from spot {spot_name}, offset {offset}"
+ )
+ response = self.client.get_spot_items(
+ spot_id=spot_id, offset=offset, page_size=self.batch_size
+ )
+ items = response.get("collection", [])
+ logger.info(f"Received Items: {items}")
+ if not items:
+ has_more = False
+ continue
+
+ for item in items:
+ try:
+ item_id = item.get("id")
+ if not item_id:
+ logger.warning("Item without ID found, skipping")
+ continue
+
+ item_details = self.client.get_item(item_id)
+ if not item_details:
+ logger.warning(
+ f"Item {item_id} details not found, skipping"
+ )
+ continue
+ # Apply time filter if specified
+ if start or end:
+ updated_at = item_details.get("date_updated")
+ if updated_at:
+ # Convert to datetime for comparison
+ try:
+ updated_time = datetime.fromisoformat(
+ updated_at.replace("Z", "+00:00")
+ )
+ if (
+ start and updated_time.timestamp() < start
+ ) or (end and updated_time.timestamp() > end):
+ continue
+ except (ValueError, TypeError):
+ # Skip if date cannot be parsed
+ logger.warning(
+ f"Invalid date format for item {item_id}: {updated_at}"
+ )
+ continue
+
+ content = self._get_item_content(item_details)
+ title = item_details.get("title", "")
+
+ doc_batch.append(
+ Document(
+ id=f"HIGHSPOT_{item_id}",
+ sections=[
+ Section(
+ link=item_details.get(
+ "url",
+ f"https://www.highspot.com/items/{item_id}",
+ ),
+ text=content,
+ )
+ ],
+ source=DocumentSource.HIGHSPOT,
+ semantic_identifier=title,
+ metadata={
+ "spot_name": spot_name,
+ "type": item_details.get("content_type", ""),
+ "created_at": item_details.get(
+ "date_added", ""
+ ),
+ "author": item_details.get("author", ""),
+ "language": item_details.get("language", ""),
+ "can_download": str(
+ item_details.get("can_download", False)
+ ),
+ },
+ doc_updated_at=item_details.get("date_updated"),
+ )
+ )
+
+ if len(doc_batch) >= self.batch_size:
+ yield doc_batch
+ doc_batch = []
+
+ except HighspotClientError as e:
+ item_id = "ID" if not item_id else item_id
+ logger.error(f"Error retrieving item {item_id}: {str(e)}")
+
+ has_more = len(items) >= self.batch_size
+ offset += self.batch_size
greptile
logic: Potential infinite loop if batch_size is 0 or negative. Add validation in __init__ to ensure batch_size > 0
suggested fix
+ has_more = len(items) >= self.batch_size and self.batch_size > 0
offset += self.batch_size
diff block
branch_labels = None
depends_on = None
+logger = logging.getLogger("alembic.runtime.migration")
+
def upgrade() -> None:
- # Get database connection
+ """Conflicts on lowercasing will result in the uppercased email getting a
+ unique integer suffix when converted to lowercase."""
+
connection = op.get_bind()
- # Update all user emails to lowercase
- connection.execute(
- text(
- """
- UPDATE "user"
- SET email = LOWER(email)
- WHERE email != LOWER(email)
- """
- )
- )
+ # Fetch all user emails that are not already lowercase
+ user_emails = connection.execute(
+ text('SELECT id, email FROM "user" WHERE email != LOWER(email)')
+ ).fetchall()
+
+ for user_id, email in user_emails:
+ email = cast(str, email)
+ username, domain = email.rsplit("@", 1)
+ new_email = f"{username.lower()}@{domain.lower()}"
+ attempt = 1
+
+ while True:
greptile
logic: Infinite loop possible if unique email cannot be generated (e.g., if attempt counter overflows)
diff block
)
return None
- def load_from_state(self) -> GenerateDocumentsOutput:
- return self.poll_source(None, None)
-
- def poll_source(
- self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None
- ) -> GenerateDocumentsOutput:
+ @override
+ def load_from_checkpoint(
+ self,
+ start: SecondsSinceUnixEpoch,
+ end: SecondsSinceUnixEpoch,
+ checkpoint: ZendeskConnectorCheckpoint,
+ ) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
self.content_tags = _get_content_tag_mapping(self.client)
if self.content_type == "articles":
- yield from self._poll_articles(start)
+ return self._retrieve_articles(start, end, checkpoint)
elif self.content_type == "tickets":
- yield from self._poll_tickets(start)
+ return self._retrieve_tickets(start, end, checkpoint)
else:
raise ValueError(f"Unsupported content_type: {self.content_type}")
- def _poll_articles(
- self, start: SecondsSinceUnixEpoch | None
- ) -> GenerateDocumentsOutput:
- articles = _get_articles(self.client, start_time=int(start) if start else None)
-
+ def _retrieve_articles(
+ self,
+ start: SecondsSinceUnixEpoch | None,
+ end: SecondsSinceUnixEpoch | None,
+ checkpoint: ZendeskConnectorCheckpoint,
+ ) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
+ checkpoint = copy.deepcopy(checkpoint)
# This one is built on the fly as there may be more many more authors than tags
- author_map: dict[str, BasicExpertInfo] = {}
-
- doc_batch = []
- for article in articles:
- if (
- article.get("body") is None
- or article.get("draft")
- or any(
- label in ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS
- for label in article.get("label_names", [])
- )
- ):
- continue
-
- new_author_map, documents = _article_to_document(
- article, self.content_tags, author_map, self.client
+ author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
+ after_cursor = checkpoint.after_cursor
+ doc_batch: list[Document] = []
+
+ # ensure at least one document is retrieved (connector makes progress)
+ while len(doc_batch) == 0:
+ response = _get_article_page(
greptile
logic: potential infinite loop if all articles are filtered out and has_more is true
suggested fix
# ensure at least one document is retrieved (connector makes progress)
+ max_attempts = 100 # Prevent infinite loops
+ attempts = 0
+ while len(doc_batch) == 0 and attempts < max_attempts:
+ attempts += 1
response = _get_article_page(
diff block
+import { CoreObjectNameSingular } from '@/object-metadata/types/CoreObjectNameSingular';
+import { useFindManyRecords } from '@/object-record/hooks/useFindManyRecords';
+import { FormMultiSelectFieldInput } from '@/object-record/record-field/form-types/components/FormMultiSelectFieldInput';
+import { Agent } from '@/settings/service-center/agents/types/Agent';
+import { useFindAllInboxes } from '@/settings/service-center/inboxes/hooks/useFindAllInboxes';
+import { useFindAllSectors } from '@/settings/service-center/sectors/hooks/useFindAllSectors';
+import { IntegrationType } from '@/settings/service-center/types/IntegrationType';
+import { Select } from '@/ui/input/components/Select';
+import { WorkspaceMember } from '@/workspace-member/types/WorkspaceMember';
+import styled from '@emotion/styled';
+import { useEffect } from 'react';
+import { Controller, useFormContext } from 'react-hook-form';
+import { H2Title, Section, Toggle, useIcons } from 'twenty-ui';
+import { z } from 'zod';
+
+const agentMetadataFormSchema = z.object({
+ id: z.string(),
+ isAdmin: z.boolean(),
+ memberId: z.string().min(1, 'Member ID is required'),
+ sectorIds: z.array(z.string()).nonempty('At least one sector ID is required'),
+ inboxesIds: z.array(z.string()).nonempty('At least one inbox ID is required'),
+ workspaceId: z.string().min(1, 'Workspace ID is required'),
+});
+
+export const SettingsServiceCenterAgentFormSchema =
+ agentMetadataFormSchema.pick({
+ isAdmin: true,
+ memberId: true,
+ sectorIds: true,
+ inboxesIds: true,
+ workspaceId: true,
+ });
+
+export type SettingsServiceCenterAgentFormSchemaValues = z.infer<
+ typeof agentMetadataFormSchema
+>;
+
+type SettingsServiceCenterAgentAboutFormProps = {
+ disabled?: boolean;
+ disableNameEdit?: boolean;
+ activeAgent?: Agent;
+};
+
+const StyledSection = styled(Section)`
+ display: flex;
+ gap: 4px;
+`;
+
+export const SettingsServiceCenterAgentAboutForm = ({
+ disabled,
+ activeAgent,
+}: SettingsServiceCenterAgentAboutFormProps) => {
+ const { control, reset } =
+ useFormContext<SettingsServiceCenterAgentFormSchemaValues>();
+ // const { t } = useTranslation();
+ const { getIcon } = useIcons();
+
+ const { records: workspaceMembers } = useFindManyRecords<WorkspaceMember>({
+ objectNameSingular: CoreObjectNameSingular.WorkspaceMember,
+ });
+
+ const { sectors, refetch: refetchSectors } = useFindAllSectors();
+ const { inboxes, refetch: refecthInboxes } = useFindAllInboxes();
+
+ const Icon = getIcon('IconIdBadge2');
+
+ const memberOptions = workspaceMembers
+ ?.filter(
+ (member) => member.agentId === '' || member.agentId === activeAgent?.id,
+ )
+ .map((workspaceMember) => {
+ const label =
+ workspaceMember.name.firstName.trim() !== '' ||
+ workspaceMember.name.lastName.trim() !== ''
+ ? `${workspaceMember.name.firstName} ${workspaceMember.name.lastName}`
+ : 'Name not provided';
+
+ return {
+ label: label,
+ value: workspaceMember.id,
+ avatarUrl: workspaceMember.avatarUrl,
+ };
+ });
+
+ const sectorsOptions =
+ sectors?.map((sector) => ({
+ Icon: getIcon(sector.icon),
+ label: sector.name,
+ value: sector.id,
+ })) ?? [];
+
+ const inboxesOptions =
+ inboxes?.map((inbox) => {
+ const inboxIcon =
+ inbox.integrationType.toLowerCase() === IntegrationType.WHATSAPP
+ ? 'IconBrandWhatsapp'
+ : null; // 'IconBrandMessenger'
+ const inboxIntegration =
+ inbox.integrationType.toLowerCase() === IntegrationType.WHATSAPP
+ ? inbox.whatsappIntegration
+ : null; // inbox.messengerIntegration
+ return {
+ Icon: getIcon(inboxIcon),
+ label: inboxIntegration?.label ?? '',
+ value: inbox.id,
+ };
+ }) ?? [];
+
+ useEffect(() => {
+ // eslint-disable-next-line @nx/workspace-explicit-boolean-predicates-in-if
+ if (activeAgent) {
+ reset({
+ id: activeAgent.id,
+ isAdmin: activeAgent.isAdmin ?? false,
+ memberId: activeAgent.memberId,
+ sectorIds: activeAgent.sectors?.map((sector) => sector.id) ?? [],
+ inboxesIds: activeAgent.inboxes?.map((inbox) => inbox.id) ?? [],
+ workspaceId: activeAgent.workspace.id ?? '',
+ });
+ }
+ }, [activeAgent, reset]);
+
+ useEffect(() => {
+ refetchSectors();
+ refecthInboxes();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
greptile
logic: empty dependency array in useEffect but using refetchSectors and refetchInboxes - could cause infinite loop if these functions change
diff block
)
return None
- def load_from_state(self) -> GenerateDocumentsOutput:
- return self.poll_source(None, None)
-
- def poll_source(
- self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None
- ) -> GenerateDocumentsOutput:
+ @override
+ def load_from_checkpoint(
+ self,
+ start: SecondsSinceUnixEpoch,
+ end: SecondsSinceUnixEpoch,
+ checkpoint: ZendeskConnectorCheckpoint,
+ ) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
self.content_tags = _get_content_tag_mapping(self.client)
if self.content_type == "articles":
- yield from self._poll_articles(start)
+ return self._retrieve_articles(start, end, checkpoint)
elif self.content_type == "tickets":
- yield from self._poll_tickets(start)
+ return self._retrieve_tickets(start, end, checkpoint)
else:
raise ValueError(f"Unsupported content_type: {self.content_type}")
- def _poll_articles(
- self, start: SecondsSinceUnixEpoch | None
- ) -> GenerateDocumentsOutput:
- articles = _get_articles(self.client, start_time=int(start) if start else None)
-
+ def _retrieve_articles(
+ self,
+ start: SecondsSinceUnixEpoch | None,
+ end: SecondsSinceUnixEpoch | None,
+ checkpoint: ZendeskConnectorCheckpoint,
+ ) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
+ checkpoint = copy.deepcopy(checkpoint)
# This one is built on the fly as there may be more many more authors than tags
- author_map: dict[str, BasicExpertInfo] = {}
-
- doc_batch = []
- for article in articles:
- if (
- article.get("body") is None
- or article.get("draft")
- or any(
- label in ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS
- for label in article.get("label_names", [])
- )
- ):
- continue
-
- new_author_map, documents = _article_to_document(
- article, self.content_tags, author_map, self.client
+ author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
+ after_cursor = checkpoint.after_cursor
+ doc_batch: list[Document] = []
+
+ # ensure at least one document is retrieved (connector makes progress)
+ while len(doc_batch) == 0:
+ response = _get_article_page(
+ self.client,
+ start_time=int(start) if start else None,
+ after_cursor=after_cursor,
)
- if new_author_map:
- author_map.update(new_author_map)
-
- doc_batch.append(documents)
- if len(doc_batch) >= self.batch_size:
- yield doc_batch
- doc_batch.clear()
+ articles = response.data
+ has_more = response.has_more
+ after_cursor = response.meta.get("after_cursor")
+ for article in articles:
+ if (
+ article.get("body") is None
+ or article.get("draft")
+ or any(
+ label in ZENDESK_CONNECTOR_SKIP_ARTICLE_LABELS
+ for label in article.get("label_names", [])
+ )
+ ):
+ continue
- if doc_batch:
- yield doc_batch
+ try:
+ new_author_map, documents = _article_to_document(
+ article, self.content_tags, author_map, self.client
+ )
+ except Exception as e:
+ yield ConnectorFailure(
+ failed_document=DocumentFailure(
+ document_id=f"{article.get('id')}",
+ document_link=article.get("html_url", ""),
+ ),
+ failure_message=str(e),
+ exception=e,
+ )
+ continue
+
+ if new_author_map:
+ author_map.update(new_author_map)
+
+ doc_batch.append(documents)
+
+ if not has_more:
+ yield from doc_batch
+ checkpoint.has_more = False
+ return checkpoint
+
+ # At least one document was retrieved, but generally its a full page
+ # of documents.
+ yield from doc_batch
+ checkpoint.after_cursor = after_cursor
+ checkpoint.has_more = bool(
+ end is None
+ or doc_batch[-1].doc_updated_at is None
+ or doc_batch[-1].doc_updated_at.timestamp() <= end
+ )
+ checkpoint.cached_author_map = (
+ author_map if len(author_map) <= MAX_AUTHOR_MAP_SIZE else None
+ )
+ return checkpoint
- def _poll_tickets(
- self, start: SecondsSinceUnixEpoch | None
- ) -> GenerateDocumentsOutput:
+ def _retrieve_tickets(
+ self,
+ start: SecondsSinceUnixEpoch | None,
+ end: SecondsSinceUnixEpoch | None,
+ checkpoint: ZendeskConnectorCheckpoint,
+ ) -> CheckpointOutput[ZendeskConnectorCheckpoint]:
+ checkpoint = copy.deepcopy(checkpoint)
if self.client is None:
raise ZendeskCredentialsNotSetUpError()
- author_map: dict[str, BasicExpertInfo] = {}
+ author_map: dict[str, BasicExpertInfo] = checkpoint.cached_author_map or {}
- ticket_generator = _get_tickets(
- self.client, start_time=int(start) if start else None
- )
+ doc_batch: list[Document] = []
+ next_start_time = int(checkpoint.next_start_time or start or 0)
+ while len(doc_batch) == 0:
+ ticket_response = _get_tickets_page(self.client, start_time=next_start_time)
+ tickets = ticket_response.data
greptile
logic: potential infinite loop if all tickets are filtered out and has_more is true
suggested fix
+ max_attempts = 100 # Prevent infinite loops
+ attempts = 0
+ while len(doc_batch) == 0 and attempts < max_attempts:
+ attempts += 1
ticket_response = _get_tickets_page(self.client, start_time=next_start_time)
tickets = ticket_response.data
diff block
import Link from 'next/link';
-import imgActorNetworking from '@/images/actors/networking.jpg';
# Networking
-Actor networking is configured with two properties:
+Rivet provides a simple yet powerful networking system for your actors. When you create an actor, you define the ports it will use, and Rivet handles the networking infrastructure, including URL generation, port mapping, and traffic routing.
-1. Network mode (Bridge or Host)
-2. Port routing (Rivet Guard or Host)
+## Configuring Networking
-## Network Modes
+When creating an actor, you specify the ports that your application needs to expose. This is done through the `network.ports` parameter when creating your actor with the [actor.create](/docs/api/actors/create) API:
-### Bridge
+```javascript
+// Simple example defining an HTTP port
+const actor = await rivet.actors.create({
+ // ...other configuration
+ network: {
+ ports: {
+ http: { protocol: "https" }
+ }
+ }
+});
+```
-Bridge networking maps a random port on the host to the chosen port number for
-your actor. Ports can only be access over the LAN. All inbound traffic to actors
-using bridge networking must be routed through Rivet Guard (see below).
+Once your actor is running with properly configured ports, you can access it through the URLs provided by Rivet. These URLs are available:
-Actors in bridge networking have isolated network namespaces and cannot
-interfere with other actor's networking.
+- Via the API in the actor creation response under `actor.network.ports.http.url`
+- In the Rivet Hub dashboard for your actor
-For containers, Rivet uses <Link href='https://www.cni.dev/'>CNI</Link> on our
-actor servers to provide bridged port access to any ports chosen by the user. It
-maps a randomly chosen port number on the host machine to the chosen port number
-by you.
+## Listening For Requests On The Actor
-Bridge networking is currently not implemented for isolates.
+Once your actor is running, Rivet injects environment variables containing information about the assigned ports. Your application should use these variables to determine what port to listen on.
-### Host
+For example:
-_Requires enterprise or self-hosted cluster._
+```javascript
+// Simple example starting an HTTP server
+const express = require('express');
+const app = express();
+const port = process.env.PORT_HTTP;
-Host networking allows direct access to host ports on our actor servers with the drawback that you cannot
-choose a specific port number. Port numbers will be randomly chosen for you for each port. To read the
-port number in your application, there are environment variables provided with the port name and number
-(ex. `PORT_MY_PORT=24020`).
+app.get('/', (req, res) => {
+ res.send('Hello World!');
+});
-Containers with host networking have no restriction on what ports they can listen on
-and connect to. This is less secure than using bridge networking.
+app.listen(port, () => {
+ console.log(`Server listening on port ${port}`);
+});
+```
-For isolates, Rivet restricts what ports an isolate can listen on.
+### Environment Variable Naming
-Host networking is sometimes required for edge cases where actors need an
-unrestricted amount of ports. It's up to the developer to ensure that multiple
-actors on the same machine do not interfere with each others' networking.
+For each port defined in your actor configuration under `network.ports`, Rivet sets a single environment variable `PORT_{NAME}` where `{NAME}` is the uppercase version of the key you used in the `network.ports` object.
-## Port Routing
+Your application should read these environment variables to know which ports to bind to.
-### Rivet Guard (RG)
+For example:
-Rivet Guard provides DDoS mitigation, automatic SSL management, monitoring, and
-more out of the box with negligible added latency. Use Rivet Guard for your
-application whenever possible for production applications.
+- `network.ports.http` becomes the `PORT_HTTP` environment variable
+- `network.port.game` becomes the `PORT_GAME` environment variable
+- `network.port["hello-world"]` becomes the `PORT_HELLO_WORLD` environment variable
-Rivet Guard is a system of servers that proxies end-user connections through itself before connecting to the
-actor servers running your application over LAN. This provides DDoS protection with little to no added latency.
+## Examples
-Rivet Guard ports cannot be accessed from requests made directly to the actor server over WAN, only through a Rivet
-Guard server over LAN.
+Here are complete examples demonstrating how to set up networking for both the client and server.
-Rivet Guard routing can be used with either host or bridge network modes.
+### HTTP Server Example
-### Host
+#### Client (Creating the Actor)
-_Requires enterprise or self-hosted cluster._
+```javascript
+// Creating an actor with an HTTP port
+const actor = await rivet.actors.create({
+ tags: { name: "my-http-server" },
+ buildTags: { name: "my-http-server", current: "true" },
+ network: {
+ ports: {
+ // "http" is the port name (becomes PORT_HTTP env var)
+ http: { protocol: "https" }
+ }
+ },
+});
-Host ports do not provide any proxying between the end-user and your application and **is not enabled on
-production Rivet servers**. This is primarily an option for <Link href='/docs/self-hosting'>self hosting</Link> Rivet.
+console.log(actor.network.ports.http); // Information about HTTP port
+console.log(actor.network.ports.http.url); // URL to connect to
+```
-It recommended to use Rivet Guard whenever possible for production applications
-to better protect from DDoS attacks & improve monitoring of your application.
+#### Actor (Starting the Server)
-If using host ports, read about differences in behavior with [actor rescheduling](/docs/rescheduling).
+```javascript
+const express = require('express');
+const app = express();
-## Endpoint Type
+// Get the port from the environment variable
+const port = process.env.PORT_HTTP;
-### Host
+app.get('/', (req, res) => {
+ res.send('Hello from Rivet!');
+});
-<Note>"Host endpoint type" has no association with "host networking" or "host routing" above.</Note>
+app.listen(port, () => {
+ console.log(`Server running on port ${port}`);
+});
+```
-The host endpoint type uses the `Host` header to route the request to the correct actor. This is the default & recommended method to use for production Rivet clusters.
+### TCP Game Server Example
-For example, the URL `c03fc1cf-66dc-4946-9663-eae42a399060-http.actor.45a33878-e0e5-4f6b-8e72-7e9f6c0a306c.rivet.run` would route to the:
+#### Client (Creating the Actor)
-- Actor with ID `c03fc1cf-66dc-4946-9663-eae42a399060`
-- Port labelled `http`
-- Region with ID `45a33878-e0e5-4f6b-8e72-7e9f6c0a306c`
+```javascript
+// Creating an actor with a TCP port for a game server
+const actor = await rivet.actors.create({
+ tags: { name: "my-game-server" },
+ buildTags: { name: "my-game-server", current: "true" },
+ network: {
+ ports: {
+ // "game" is the port name (becomes PORT_GAME env var)
+ game: { protocol: "tcp" }
+ }
+ },
+});
-If self-hosting, host endpoints requires [wildcard DNS records](https://en.wikipedia.org/wiki/Wildcard_DNS_record#:~:text=A%20wildcard%20DNS%20record%20is,example.com%20.) & [wildcard SSL certificates](https://letsencrypt.org/docs/faq/#does-let-s-encrypt-issue-wildcard-certificates).
+let port = actor.network.ports.game;
+console.log(`${port.hostname}:${port.port}`); // Connection address for port
+```
-### Path
+#### Server (Starting the Server)
-The path endpoint type uses the beginning of the request path to route to the correct actor. This should only be used if:
+```javascript
+const net = require('net');
-- Self hosting or developing Rivet without a wildcard DNS record (e.g. Rivet Guard running on `127.0.0.1:7080`)
-- Rare cases where explicit hostnames need to be whitelisted (e.g. Discord Activities [URL mappings](https://discord.com/developers/docs/activities/development-guides#url-mapping))
+// Get the port from the environment variable
+// This corresponds to the "game" key in the network.ports object
+const port = process.env.PORT_GAME;
-<Warning>
- Directing your users to an HTML page with path endpoint type may be a security
- risk. The origin for path endpoint types (`route.actor.{region}.rivet.run`) is
- shared with other actors. This means that all cookies, local/session storage,
- web workers, etc are shared with any other actor running in the same region.
+const server = net.createServer((socket) => {
+ socket.write('Hello from Rivet Game Server!\r\n');
+ socket.pipe(socket);
+});
greptile
logic: socket.pipe(socket) creates an echo server that may cause infinite loops - consider removing or documenting the echo behavior
diff block
+import type OpenAI from 'openai';
+import type {
+ __Autogenerated__AppId,
+ __Autogenerated__PromptIdV2,
+ __Autogenerated__PromptMajorVersionV2,
+ __Autogenerated__PromptMinorVersionV2,
+ __Autogenerated__PromptParamsV2,
+ __Autogenerated__PromptTemplateIdV2,
+ __Autogenerated__PromptTemplateParamsV2,
+ __Autogenerated__PromptToolNameV2,
+ __Autogenerated__PromptToolParamsV2,
+} from './autogenerated-v2';
+import {
+ zPromptSchema,
+ type TimeDelta,
+ type PromptTracking,
+ type Prompt,
+} from '../types';
+import {
+ readEnv,
+ AutoblocksEnvVar,
+ convertTimeDeltaToMilliSeconds,
+ RevisionSpecialVersionsEnum,
+ AUTOBLOCKS_HEADERS,
+ REVISION_UNDEPLOYED_VERSION,
+ V2_API_ENDPOINT,
+} from '../util';
+import { renderTemplateWithParams, renderToolWithParams } from './util';
+import { testCaseRunAsyncLocalStorage } from '../asyncLocalStorage';
+
+/**
+ * Note that we check for the presence of the CLI environment
+ * variable and not the test case local storage because the
+ * local storage vars aren't set until runTestSuite is called,
+ * whereas a prompt manager might have already been imported
+ * and initialized by the time runTestSuite is called.
+ */
+const isTestingContext = (): boolean => {
+ return readEnv(AutoblocksEnvVar.AUTOBLOCKS_CLI_SERVER_ADDRESS) !== undefined;
+};
+
+/**
+ * The AUTOBLOCKS_OVERRIDES_PROMPT_REVISIONS environment variable is a JSON-stringified
+ * map of prompt IDs to revision IDs. This is set in CI test runs triggered
+ * from the UI.
+ */
+const promptRevisionsMap = (): Record<string, string> => {
+ if (!isTestingContext()) {
+ return {};
+ }
+
+ const promptRevisionsRaw = readEnv(
+ AutoblocksEnvVar.AUTOBLOCKS_OVERRIDES_PROMPT_REVISIONS,
+ );
+ if (!promptRevisionsRaw) {
+ return {};
+ }
+
+ return JSON.parse(promptRevisionsRaw);
+};
+
+export class AutoblocksPromptManagerV2<
+ AppId extends __Autogenerated__AppId,
+ PromptId extends __Autogenerated__PromptIdV2<AppId>,
+ MajorVersion extends __Autogenerated__PromptMajorVersionV2<AppId, PromptId>,
+ MinorVersion extends __Autogenerated__PromptMinorVersionV2<
+ AppId,
+ PromptId,
+ MajorVersion
+ >,
+> {
+ private readonly appId: AppId;
+ private readonly id: PromptId;
+ private readonly majorVersion: string;
+ private readonly minorVersion: string | { version: string; weight: number }[];
+ private readonly minorVersionsToRequest: string[];
+
+ private readonly apiKey: string;
+
+ // Map of minor version -> prompt
+ private prompts: Record<string, Prompt> = {};
+
+ // Used in a testing context to override the prompt with
+ // a revision if AUTOBLOCKS_PROMPT_REVISIONS is set for this
+ // prompt ID.
+ private promptRevisionOverride: Prompt | undefined = undefined;
+
+ private readonly refreshIntervalTimer: NodeJS.Timer | undefined;
+ private readonly refreshTimeoutMs: number;
+ private readonly initTimeoutMs: number;
+
+ constructor(args: {
+ appId: AppId;
+ id: PromptId;
+ version: {
+ major: MajorVersion;
+ minor:
+ | MinorVersion
+ | [
+ { version: MinorVersion; weight: number },
+ ...{ version: MinorVersion; weight: number }[],
+ ];
+ };
+ apiKey?: string;
+ refreshInterval?: TimeDelta;
+ refreshTimeout?: TimeDelta;
+ initTimeout?: TimeDelta;
+ }) {
+ this.appId = args.appId;
+ this.id = args.id;
+ this.majorVersion = args.version.major;
+ this.minorVersion = args.version.minor;
+ this.minorVersionsToRequest = makeMinorVersionsToRequest({
+ minorVersion: this.minorVersion,
+ });
+
+ const apiKey =
+ args.apiKey || readEnv(AutoblocksEnvVar.AUTOBLOCKS_V2_API_KEY);
+ if (!apiKey) {
+ throw new Error(
+ `You must either pass in the API key via 'apiKey' or set the '${AutoblocksEnvVar.AUTOBLOCKS_V2_API_KEY}' environment variable.`,
+ );
+ }
+ this.apiKey = apiKey;
+
+ this.refreshTimeoutMs = convertTimeDeltaToMilliSeconds(
+ args.refreshTimeout || { seconds: 30 },
+ );
+ this.initTimeoutMs = convertTimeDeltaToMilliSeconds(
+ args.initTimeout || { seconds: 30 },
+ );
+
+ if (
+ this.minorVersionsToRequest.includes(RevisionSpecialVersionsEnum.LATEST)
+ ) {
+ if (isTestingContext()) {
+ this.logger.info(
+ 'Prompt refreshing is disabled when in a testing context.',
+ );
+ return;
+ }
+
+ const refreshInterval = args.refreshInterval || { seconds: 10 };
+ const refreshIntervalMs = convertTimeDeltaToMilliSeconds(refreshInterval);
+ if (refreshIntervalMs < 1000) {
+ throw new Error(
+ `Refresh interval can't be shorter than 1 second (got ${refreshIntervalMs}ms)`,
+ );
+ }
+ this.logger.info(
+ `Refreshing latest prompt every ${Math.round(
+ refreshIntervalMs / 1000,
+ )} seconds`,
+ );
+ this.refreshIntervalTimer = setInterval(
+ this.refreshLatest.bind(this),
+ refreshIntervalMs,
+ );
+ }
+ }
+
+ private get logger() {
+ const prefix = `[${this.appId}/${this.id}@v${this.majorVersion}]`;
+ return {
+ info: (message: string) => console.info(`${prefix} ${message}`),
+ warn: (message: string) => console.warn(`${prefix} ${message}`),
+ error: (message: string) => console.error(`${prefix} ${message}`),
+ };
+ }
+
+ private makeRequestUrl(args: { minorVersion: string }): string {
+ const appId = encodeURIComponent(this.appId);
+ const promptId = encodeURIComponent(this.id);
+
+ let majorVersion: string;
+ let minorVersion: string = args.minorVersion;
+
+ if (
+ this.majorVersion ===
+ RevisionSpecialVersionsEnum.DANGEROUSLY_USE_UNDEPLOYED
+ ) {
+ majorVersion = REVISION_UNDEPLOYED_VERSION;
+ } else {
+ majorVersion = this.majorVersion;
+ }
+
+ majorVersion = encodeURIComponent(majorVersion);
+ minorVersion = encodeURIComponent(minorVersion);
+
+ return `${V2_API_ENDPOINT}/apps/${appId}/prompts/${promptId}/major/${majorVersion}/minor/${minorVersion}`;
+ }
+
+ private makeRevisionValidateOverrideRequestUrl(args: {
+ revisionId: string;
+ }): string {
+ const appId = encodeURIComponent(this.appId);
+ const promptId = encodeURIComponent(this.id);
+ const revisionId = encodeURIComponent(args.revisionId);
+
+ return `${V2_API_ENDPOINT}/apps/${appId}/prompts/${promptId}/revisions/${revisionId}/validate`;
+ }
+
+ private async getPrompt(args: {
+ minorVersion: string;
+ timeoutMs: number;
+ throwOnError: boolean;
+ }): Promise<Prompt | undefined> {
+ const url = this.makeRequestUrl({ minorVersion: args.minorVersion });
+
+ try {
+ const resp = await fetch(url, {
+ method: 'GET',
+ headers: {
+ ...AUTOBLOCKS_HEADERS,
+ Authorization: `Bearer ${this.apiKey}`,
+ },
+ signal: AbortSignal.timeout(args.timeoutMs),
+ });
+ const data = await resp.json();
+ return zPromptSchema.parse(data);
+ } catch (err) {
+ this.logger.error(
+ `Failed to fetch version v${this.majorVersion}.${args.minorVersion}: ${err}`,
+ );
+ if (args.throwOnError) {
+ throw err;
+ }
+ }
+
+ return undefined;
+ }
+
+ /**
+ * If this prompt has a revision override set, use the /validate endpoint to check if the
+ * major version this prompt manager is configured to use is compatible to be
+ * overridden with the revision.
+ */
+ private async setPromptRevisionOverride(args: {
+ revisionId: string;
+ }): Promise<void> {
+ // Double check we're in a testing context
+ if (!isTestingContext()) {
+ this.logger.error(
+ "Can't set prompt revision unless in a testing context.",
+ );
+ return;
+ }
+
+ // Double check the given revisionId belongs to this prompt manager
+ const expectedRevisionId = promptRevisionsMap()[this.id];
+ if (args.revisionId !== expectedRevisionId) {
+ throw new Error(
+ `Revision ID '${args.revisionId}' does not match the revision ID for this prompt manager '${expectedRevisionId}'.`,
+ );
+ }
+
+ if (
+ this.majorVersion ===
+ RevisionSpecialVersionsEnum.DANGEROUSLY_USE_UNDEPLOYED
+ ) {
+ throw new Error(
+ `Prompt revision overrides are not yet supported for prompt managers using 'dangerously-use-undeployed'.
+ Reach out to support@autoblocks.ai for more details.`,
+ );
+ }
+
+ const url = this.makeRevisionValidateOverrideRequestUrl({
+ revisionId: args.revisionId,
+ });
+ const resp = await fetch(url, {
+ method: 'POST',
+ headers: {
+ ...AUTOBLOCKS_HEADERS,
+ Authorization: `Bearer ${this.apiKey}`,
+ },
+ body: JSON.stringify({
+ majorVersion: parseInt(this.majorVersion, 10),
+ }),
+ signal: AbortSignal.timeout(this.initTimeoutMs),
+ });
+
+ if (resp.status === 409) {
+ // The /validate endpoint returns this status code when the revision is
+ // not compatible with the major version this prompt manager
+ // is configured to use.
+ throw new Error(
+ `Can't override prompt '${this.id}' with revision '${args.revisionId}' because it is not compatible with major version '${this.majorVersion}'.`,
+ );
+ }
+
+ const data = await resp.json();
+
+ // Throw for any unexpected errors
+ if (!resp.ok) {
+ throw new Error(`HTTP Error: ${JSON.stringify(data)}`);
+ }
+
+ this.logger.warn(
+ `Overriding prompt '${this.id}' with revision '${args.revisionId}'!`,
+ );
+ this.promptRevisionOverride = zPromptSchema.parse(data);
+ }
+
+ private async refreshLatest(): Promise<void> {
+ try {
+ // Get the latest minor version within this prompt's major version
+ const newLatest = await this.getPrompt({
+ minorVersion: RevisionSpecialVersionsEnum.LATEST,
+ timeoutMs: this.refreshTimeoutMs,
+ throwOnError: false,
+ });
+ if (!newLatest) {
+ this.logger.warn(`Failed to refresh latest prompt`);
+ return;
+ }
+
+ // Get the prompt we're replacing
+ const oldLatest = this.prompts[RevisionSpecialVersionsEnum.LATEST];
+
+ // Update the prompt
+ this.prompts[RevisionSpecialVersionsEnum.LATEST] = newLatest;
+
+ // Log if we're replacing an older version of the prompt
+ if (oldLatest && oldLatest.version !== newLatest.version) {
+ this.logger.info(
+ `Updated latest prompt from v${oldLatest.version} to v${newLatest.version}`,
+ );
+ }
+ } catch (err) {
+ this.logger.warn(`Failed to refresh latest prompt: ${err}`);
+ }
+ }
+
+ private async initUnsafe(): Promise<void> {
+ if (isTestingContext() && promptRevisionsMap()[this.id]) {
+ // Set the prompt revision override if we're in a testing context and a
+ // revision is set for this manager's prompt ID
+ const revisionId = promptRevisionsMap()[this.id];
+ await this.setPromptRevisionOverride({ revisionId });
+ return;
+ }
+
+ // Not in testing context or no revision override set, proceed as configured
+ const prompts = await Promise.all(
+ this.minorVersionsToRequest.map(async (minorVersion) => {
+ const prompt = await this.getPrompt({
+ minorVersion,
+ timeoutMs: this.initTimeoutMs,
+ throwOnError: true,
+ });
+ return [minorVersion, prompt] as const;
+ }),
+ );
+
+ // Make the map of minor version -> prompt
+ const promptsMap: Record<string, Prompt> = {};
+ prompts.forEach(([minorVersion, prompt]) => {
+ if (prompt) {
+ // NOTE: Use minorVersion from the `prompts` array, not `prompt.minorVersion`,
+ // since for `minorVersion=latest`, `prompt.minorVersion` will be the actual
+ // version of the prompt but we want to use `latest` as the key.
+ promptsMap[minorVersion] = prompt;
+ } else {
+ throw new Error(
+ `Failed to fetch version v${this.majorVersion}.${minorVersion}`,
+ );
+ }
+ });
+
+ // Set the prompts
+ this.prompts = promptsMap;
+ }
+
+ async init(): Promise<void> {
+ try {
+ await this.initUnsafe();
+ } catch (err) {
+ this.logger.error(`Failed to initialize prompt manager: ${err}`);
+ throw err;
+ }
+ this.logger.info('Successfully initialized prompt manager!');
+ }
+
+ close(): void {
+ if (this.refreshIntervalTimer) {
+ clearInterval(this.refreshIntervalTimer);
+ }
+ }
+
+ private chooseExecutionPrompt(): Prompt | undefined {
+ if (isTestingContext() && this.promptRevisionOverride) {
+ // Always use the prompt revision override if it is set
+ return this.promptRevisionOverride;
+ }
+ if (Array.isArray(this.minorVersion)) {
+ const weightTotal = this.minorVersion.reduce(
+ (acc, cur) => acc + cur.weight,
+ 0,
+ );
+ const rand = Math.random() * weightTotal;
+ let cur = 0;
+ for (const minor of this.minorVersion) {
+ cur += minor.weight;
+ if (rand < cur) {
+ return this.prompts[minor.version];
+ }
+ }
+
+ // We shouldn't reach this point, but just in case,
+ // return first in the weighted list.
+ return this.prompts[this.minorVersion[0].version];
+ } else {
greptile
logic: No validation that weights sum to a positive number. Could cause infinite loop or undefined behavior if all weights are 0 or negative.
suggested fix
if (Array.isArray(this.minorVersion)) {
const weightTotal = this.minorVersion.reduce(
(acc, cur) => acc + cur.weight,
0,
);
+ if (weightTotal <= 0) {
+ throw new Error('Weights must sum to a positive number');
}
const rand = Math.random() * weightTotal;
let cur = 0;
for (const minor of this.minorVersion) {
+ if (minor.weight < 0) {
+ throw new Error('Weights cannot be negative');
}
cur += minor.weight;
if (rand < cur) {
return this.prompts[minor.version];
}
}
// We shouldn't reach this point, but just in case,
// return first in the weighted list.
return this.prompts[this.minorVersion[0].version];
} else {
diff block
+import * as https from "https";
+import { parseStringPromise } from "xml2js";
+import { showToast, Toast, LocalStorage, getPreferenceValues } from "@raycast/api";
+
+export interface Article {
+ title: string;
+ link: string;
+ pubDate: string;
+ description: string;
+ creator?: string;
+ categories?: string[];
+ content?: string;
+ guid?: string;
+}
+
+interface CachedData {
+ articles: Article[];
+ timestamp: number;
+}
+
+// Cache expiration time (24 hours)
+const CACHE_EXPIRATION = 24 * 60 * 60 * 1000;
+
+// Format a date string
+export function formatDate(dateString: string): string {
+ const date = new Date(dateString);
+ return date.toLocaleDateString(undefined, {
+ year: "numeric",
+ month: "long",
+ day: "numeric",
+ hour: "2-digit",
+ minute: "2-digit",
+ });
+}
+
+// Truncate text to a specified length
+export function truncateText(text: string, maxLength: number): string {
+ if (!text) return "";
+
+ // Remove HTML tags
+ const plainText = text.replace(/<[^>]*>/g, "");
+
+ if (plainText.length <= maxLength) return plainText;
+
+ return plainText.substring(0, maxLength) + "...";
+}
+
+// Notify about article updates
+function notifyArticleUpdates(articles: Article[]) {
+ // This function is called when new articles are fetched
+ // It could be expanded to show notifications or update UI
+ console.log(`Updated articles: ${articles.length}`);
+}
+
+// Clear the cache
+async function clearCache() {
+ await LocalStorage.removeItem("cached_articles");
+ console.log("Cache cleared");
+}
+
+/**
+ * Performs an HTTP request and returns the response as a string
+ */
+export function fetchData(urlOrParams: string): Promise<string> {
+ return new Promise((resolve, reject) => {
+ // Determine if we're dealing with a full URL or just params
+ const isFullUrl = urlOrParams.startsWith("http");
+ const url = isFullUrl ? urlOrParams : `https://www.stadt-bremerhaven.de/feed/?${urlOrParams}`;
+
+ console.log(`Fetching: ${url}`);
+
+ const req = https.get(
+ url,
+ {
+ headers: {
+ "User-Agent": "Raycast-CaschysBlog-Extension/1.0",
+ Accept: "application/rss+xml, application/xml, text/xml, */*",
+ },
+ },
+ (res) => {
+ // Handle redirects manually
+ if (res.statusCode === 301 || res.statusCode === 302) {
+ if (res.headers.location) {
+ const redirectUrl = new URL(res.headers.location, url).toString();
+ console.log(`Following redirect to: ${redirectUrl}`);
+ return fetchData(redirectUrl).then(resolve).catch(reject);
+ }
+ }
greptile
logic: Potential infinite redirect loop if server keeps redirecting. Consider adding a max redirect limit
```suggestion
if (res.statusCode === 301 || res.statusCode === 302) {
if (res.headers.location) {
const redirectUrl = new URL(res.headers.location, url).toString();
console.log(`Following redirect to: ${redirectUrl}`);
+ // Add redirect count to prevent infinite loops
+ const redirectCount = Number(req.getHeader('x-redirect-count') || 0);
+ if (redirectCount >= 5) {
+ reject(new Error('Too many redirects'));
+ return;
}
return fetchData(redirectUrl).then(resolve).catch(reject);
}
}
```
diff block
+import { Extension } from '@tiptap/core'
+import { getBlockInfoFromPos } from '../Blocks/helpers/getBlockInfoFromPos'
+
+declare module '@tiptap/core' {
+ interface Commands<ReturnType> {
+ diff: {
+ setDiff: (diff: 'added' | 'deleted' | 'updated' | 'undefined') => ReturnType
+ }
+ }
+}
+
+export const DiffExtension = Extension.create({
+ name: 'diff',
+
+ addGlobalAttributes() {
+ return [
+ {
+ // Attribute is applied to block content instead of container so that child blocks don't inherit the text
+ // alignment styling.
+ types: ['paragraph'],
+ attributes: {
+ diff: {
+ default: 'undefined',
+ parseHTML: (element) => element.getAttribute('data-diff'),
+ renderHTML: (attributes) =>
+ attributes.diff !== 'undefined' && {
+ 'data-diff': attributes.diff,
+ },
+ },
+ },
+ },
+ ]
+ },
+
+ addCommands() {
+ return {
+ setDiff:
+ (diff) =>
+ ({ state }) => {
+ const positionsBeforeSelectedContent = []
+
+ const blockInfo = getBlockInfoFromPos(state.doc, state.selection.from)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ // Finds all blockContent nodes that the current selection is in.
+ let pos = blockInfo.startPos
+ while (pos < state.selection.to) {
+ if (state.doc.resolve(pos).node().type.spec.group === 'blockContent') {
+ positionsBeforeSelectedContent.push(pos - 1)
+
+ pos += state.doc.resolve(pos).node().nodeSize - 1
+ } else {
greptile
logic: potential infinite loop if node structure is malformed - should add max iteration check
diff block
+import { Node, NodeType } from 'prosemirror-model'
+
+export type BlockInfoWithoutPositions = {
+ id: string
+ node: Node
+ contentNode: Node
+ contentType: NodeType
+ numChildBlocks: number
+}
+
+export type BlockInfo = BlockInfoWithoutPositions & {
+ startPos: number
+ endPos: number
+ depth: number
+}
+
+/**
+ * Helper function for `getBlockInfoFromPos`, returns information regarding
+ * provided blockContainer node.
+ * @param blockContainer The blockContainer node to retrieve info for.
+ */
+export function getBlockInfo(blockContainer: Node): BlockInfoWithoutPositions {
+ const id = blockContainer.attrs['id']
+ const contentNode = blockContainer.firstChild!
+ const contentType = contentNode.type
+ const numChildBlocks = blockContainer.childCount === 2 ? blockContainer.lastChild!.childCount : 0
+
+ return {
+ id,
+ node: blockContainer,
+ contentNode,
+ contentType,
+ numChildBlocks,
+ }
+}
+
+/**
+ *
+ * Retrieves information regarding the nearest blockContainer node in a
+ * ProseMirror doc, relative to a position.
+ * @param doc The ProseMirror doc.
+ * @param pos An integer position.
+ * @returns A BlockInfo object for the nearest blockContainer node.
+ */
+export function getBlockInfoFromPos(doc: Node, pos: number): BlockInfo {
+ // If the position is outside the outer block group, we need to move it to the
+ // nearest block. This happens when the collaboration plugin is active, where
+ // the selection is placed at the very end of the doc.
+ const outerBlockGroupStartPos = 1
+ const outerBlockGroupEndPos = doc.nodeSize - 2
+ if (pos <= outerBlockGroupStartPos) {
+ pos = outerBlockGroupStartPos + 1
+
+ while (doc.resolve(pos).parent.type.name !== 'blockContainer' && pos < outerBlockGroupEndPos) {
+ pos++
+ }
greptile
logic: Potential infinite loop if no blockContainer is found before reaching outerBlockGroupEndPos
diff block
+import { Node, ResolvedPos } from '@tiptap/pm/model'
+import { EditorState } from 'prosemirror-state'
+
+export type GroupInfo = {
+ group: Node
+ container?: Node
+ depth: number
+ level: number
+ $pos: ResolvedPos
+}
+
+export function getGroupInfoFromPos(pos: number, state: EditorState): GroupInfo {
+ const $pos = state.doc.resolve(pos)
+ const maxDepth = $pos.depth
+ // Set group to first node found at position
+ let group = $pos.node(maxDepth)
+ let container
+ let depth = maxDepth
+
+ // Find block group, block container and depth it is at
+ while (true) {
+ if (depth < 0) {
+ break
+ }
greptile
logic: Infinite loop risk if document structure is malformed and no blockGroup is found. Consider adding a safety check on max iterations.
diff block
+import { Editor, mergeAttributes, Node, textblockTypeInputRule } from '@tiptap/core'
+import { Fragment, Slice } from '@tiptap/pm/model'
+import { Plugin, PluginKey, TextSelection } from '@tiptap/pm/state'
+import styles from '@/lib/blocknote/core/extensions/Blocks/nodes/Block.module.css'
+import { BlockNoteDOMAttributes, getBlockInfoFromPos, mergeCSSClasses } from '../blocknote'
+import { getGroupInfoFromPos } from '../blocknote/core/extensions/Blocks/helpers/getGroupInfoFromPos'
+
+declare module '@tiptap/core' {
+ interface Commands<ReturnType> {
+ 'code-block': {
+ /**
+ * Set a code block
+ */
+ setCodeBlock: (attributes?: { language: string }) => ReturnType
+ /**
+ * Toggle a code block
+ */
+ toggleCodeBlock: (attributes?: { language: string }) => ReturnType
+ }
+ }
+}
+
+export const backtickInputRegex = /^```([a-z]+)?[\s\n]$/
+export const tildeInputRegex = /^~~~([a-z]+)?[\s\n]$/
+
+export interface CodeBlockOptions {
+ /**
+ * Adds a prefix to language classes that are applied to code tags.
+ * Defaults to `'language-'`.
+ */
+ languageClassPrefix: string
+ /**
+ * BlockNote's default DOM attributes
+ */
+ domAttributes?: BlockNoteDOMAttributes
+}
+
+export const CodeBlock = Node.create<CodeBlockOptions>({
+ name: 'code-block',
+
+ addOptions() {
+ return {
+ languageClassPrefix: 'language-',
+ domAttributes: {},
+ }
+ },
+
+ content: 'text*',
+
+ marks: '',
+
+ group: 'blockContent',
+
+ code: true,
+
+ defining: true,
+
+ addAttributes() {
+ return {
+ language: {
+ default: '',
+ parseHTML: (element) => {
+ const { languageClassPrefix } = this.options
+ const getClassNames = (classList: DOMTokenList) => Array.from(classList || [])
+
+ const classNames = [
+ ...getClassNames(element.classList),
+ ...getClassNames(element.firstElementChild?.classList || new DOMTokenList()),
+ ]
+ const languages = classNames
+ .filter((className) => className.startsWith(languageClassPrefix))
+ .map((className) => className.replace(languageClassPrefix, ''))
+ const language = languages[0]
+
+ if (!language) {
+ return ''
+ }
+
+ return language
+ },
+ rendered: false,
+ },
+ }
+ },
+
+ parseHTML() {
+ return [
+ {
+ tag: 'pre',
+ preserveWhitespace: 'full',
+ },
+ ]
+ },
+
+ renderHTML({ HTMLAttributes, node }) {
+ const blockContentDOMAttributes = this.options.domAttributes?.blockContent || {}
+ const inlineContentDOMAttributes = this.options.domAttributes?.inlineContent || {}
+
+ return [
+ 'pre',
+ mergeAttributes(HTMLAttributes, {
+ ...blockContentDOMAttributes,
+ class: mergeCSSClasses(
+ styles.blockContent,
+ blockContentDOMAttributes.class,
+ node.attrs.language.length ? this.options.languageClassPrefix + node.attrs.language : '',
+ ),
+ 'data-content-type': this.name,
+ 'data-language': HTMLAttributes.language,
+ }),
+ [
+ 'code',
+ {
+ ...inlineContentDOMAttributes,
+ class: mergeCSSClasses(styles.inlineContent, inlineContentDOMAttributes.class),
+ },
+ 0,
+ ],
+ ]
+ },
+
+ addCommands() {
+ return {
+ setCodeBlock:
+ (attributes) =>
+ ({ commands }) => {
+ return commands.setNode(this.name, attributes)
+ },
+ toggleCodeBlock:
+ (attributes) =>
+ ({ commands }) => {
+ return commands.toggleNode(this.name, 'paragraph', attributes)
+ },
+ }
+ },
+
+ addKeyboardShortcuts() {
+ function splitCodeBlock(editor: Editor) {
+ const { state } = editor
+ const codePos = state.doc.resolve(state.selection.$from.pos)
+ const blockInfo = getBlockInfoFromPos(state.doc, codePos.pos)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { depth } = blockInfo
+
+ const originalBlockContent = state.doc.cut(codePos.start(), codePos.pos)
+ const newBlockContent = state.doc.cut(codePos.pos, codePos.end())
+
+ const newBlock = state.schema.nodes.blockContainer.createAndFill()!
+ const nextBlockPos = codePos.end() + 2
+ const nextBlockContentPos = nextBlockPos + 2
+
+ let tr = state.tr
+ tr = tr.insert(nextBlockPos, newBlock)
+ tr = tr.replace(
+ nextBlockContentPos,
+ nextBlockContentPos + 1,
+ newBlockContent.content.size > 0 ? new Slice(Fragment.from(newBlockContent), depth + 2, depth + 2) : undefined,
+ )
+ tr = tr.replace(
+ codePos.start(),
+ codePos.end(),
+ originalBlockContent.content.size > 0
+ ? new Slice(Fragment.from(originalBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+
+ editor.view.dispatch(tr)
+
+ let blockContentLength = 0
+ if (newBlockContent.textContent) blockContentLength = newBlockContent.textContent.length
+ editor.commands.setTextSelection(nextBlockContentPos - blockContentLength)
+
+ return true
+ }
+
+ return {
+ 'Mod-Alt-c': () => this.editor.commands.toggleCodeBlock(),
+
+ // Split code block's content on current selection and move other content to the next block.
+ 'Shift-Enter': ({ editor }) => splitCodeBlock(editor),
+ 'Mod-Enter': ({ editor }) => splitCodeBlock(editor),
+
+ // remove code block when at start of document or code block is empty
+ Backspace: () => {
+ const { empty, $anchor } = this.editor.state.selection
+ const isAtStart = $anchor.pos === 1
+
+ if (!empty || $anchor.parent.type.name !== this.name) {
+ return false
+ }
+
+ if (isAtStart || !$anchor.parent.textContent.length) {
+ return this.editor.commands.clearNodes()
+ }
+
+ return false
+ },
+
+ // remove double space (if any) from the current line on shift+tab click
+ 'Shift-Tab': ({ editor }) => {
+ const { state, view } = editor
+ const { selection } = state
+ const { $from, $to, empty } = selection
+
+ if ($from.parent.type !== this.type) {
+ return false
+ }
+
+ const codePos = state.doc.resolve($from.pos)
+
+ if (codePos.pos === codePos.start() && empty) {
+ return false
+ }
+
+ const codeBlock = codePos.parent
+ let currentPosInBlock = codePos.pos - codePos.start()
+ let currentChar: string = ''
+ const tabSpace = ' '
+
+ do {
+ currentPosInBlock--
+
+ currentChar = codeBlock.textBetween(currentPosInBlock, currentPosInBlock + 1)
+ } while (currentChar !== '\n' && currentPosInBlock !== -1)
greptile
logic: Potential infinite loop if currentPosInBlock becomes negative while currentChar is not a newline
suggested fix
do {
currentPosInBlock--
+ currentChar = currentPosInBlock >= 0 ? codeBlock.textBetween(currentPosInBlock, currentPosInBlock + 1) : '\n'
+ } while (currentPosInBlock >= 0 && currentChar !== '\n')
diff block
export async function pollWithDelay<T>(
asyncFn: (...args: any[]) => Promise<T>,
args: any[],
- delay = 1000,
+ delay: number,
condition: PollerCondition<T> = () => true
) {
- const delayInMs = delay * 10000;
+ const delayInMs = delay * 1000;
let result: T | undefined;
while (condition(result)) {
greptile
logic: Infinite loop risk - condition(result) will always be true if result is undefined and condition defaults to () => true
diff block
+import asyncio
+import dataclasses
+import datetime as dt
+import json
+import typing
+
+import temporalio.activity
+import temporalio.common
+import temporalio.workflow
+
+from posthog.clickhouse.client import sync_execute
+from posthog.temporal.common.base import PostHogWorkflow
+from posthog.temporal.common.heartbeat import Heartbeater
+from posthog.temporal.common.logger import get_internal_logger
+
+
+def get_session_replay_events(
+ table_name: str,
+ started_after: dt.datetime,
+ started_before: dt.datetime,
+) -> list[tuple]:
+ """Get session replay events from the specified table within the time range."""
+ query = """
+ SELECT
+ session_id,
+ any(distinct_id) as distinct_id,
+ min(min_first_timestamp) as min_first_timestamp,
+ max(max_last_timestamp) as max_last_timestamp,
+ argMinMerge(first_url) as first_url,
+ groupUniqArrayArray(all_urls) as all_urls,
+ sum(click_count) as click_count,
+ sum(keypress_count) as keypress_count,
+ sum(mouse_activity_count) as mouse_activity_count,
+ sum(active_milliseconds) as active_milliseconds,
+ sum(console_log_count) as console_log_count,
+ sum(console_warn_count) as console_warn_count,
+ sum(console_error_count) as console_error_count,
+ sum(size) as size,
+ sum(message_count) as message_count,
+ sum(event_count) as event_count,
+ argMinMerge(snapshot_source) as snapshot_source,
+ argMinMerge(snapshot_library) as snapshot_library
+ {block_fields}
+ FROM
+ {table}
+ GROUP BY
+ session_id
+ HAVING
+ min_first_timestamp >= toDateTime(%(started_after)s)
+ AND min_first_timestamp <= toDateTime(%(started_before)s)
+ """
+
+ # Add block-related fields only for v2 table
+ block_fields = (
+ """
+ ,groupArrayArray(block_first_timestamps) as block_first_timestamps,
+ groupArrayArray(block_last_timestamps) as block_last_timestamps,
+ groupArrayArray(block_urls) as block_urls
+ """
+ if "_v2_" in table_name
+ else ""
+ )
+
+ return sync_execute(
+ query.format(table=table_name, block_fields=block_fields),
+ {
+ "started_after": started_after.strftime("%Y-%m-%d %H:%M:%S"),
+ "started_before": started_before.strftime("%Y-%m-%d %H:%M:%S"),
+ },
+ )
+
+
+FIELD_NAMES = [
+ "distinct_id",
+ "min_first_timestamp",
+ "max_last_timestamp",
+ "first_url",
+ "all_urls",
+ "click_count",
+ "keypress_count",
+ "mouse_activity_count",
+ "active_milliseconds",
+ "console_log_count",
+ "console_warn_count",
+ "console_error_count",
+ "size",
+ "message_count",
+ "event_count",
+ "snapshot_source",
+ "snapshot_library",
+]
+
+
+@dataclasses.dataclass(frozen=True)
+class CompareRecordingMetadataActivityInputs:
+ """Inputs for the `compare_recording_metadata_activity`."""
+
+ started_after: str = dataclasses.field()
+ started_before: str = dataclasses.field()
+
+ @property
+ def properties_to_log(self) -> dict[str, typing.Any]:
+ return {
+ "started_after": self.started_after,
+ "started_before": self.started_before,
+ }
+
+
+@temporalio.activity.defn
+async def compare_recording_metadata_activity(inputs: CompareRecordingMetadataActivityInputs) -> None:
+ """Compare session recording metadata between storage backends."""
+ async with Heartbeater():
+ logger = get_internal_logger()
+ started_after = dt.datetime.fromisoformat(inputs.started_after)
+ started_before = dt.datetime.fromisoformat(inputs.started_before)
+
+ results_v1 = get_session_replay_events(
+ "session_replay_events",
+ started_after,
+ started_before,
+ )
+ results_v2 = get_session_replay_events(
+ "session_replay_events_v2_test",
+ started_after,
+ started_before,
+ )
+
+ await logger.ainfo(
+ "Found %d session recordings in v1 and %d in v2 that started between %s and %s",
+ len(results_v1),
+ len(results_v2),
+ started_after,
+ started_before,
+ )
+
+ # Create lookup tables for easier comparison
+ v1_sessions = {r[0]: r for r in results_v1} # session_id -> full record
+ v2_sessions = {r[0]: r for r in results_v2}
+
+ # Find sessions in v1 but not in v2
+ only_in_v1 = set(v1_sessions.keys()) - set(v2_sessions.keys())
+ if only_in_v1:
+ await logger.ainfo("Sessions only in v1: %s", only_in_v1)
+
+ # Find sessions in v2 but not in v1
+ only_in_v2 = set(v2_sessions.keys()) - set(v1_sessions.keys())
+ if only_in_v2:
+ await logger.ainfo("Sessions only in v2: %s", only_in_v2)
+
+ # Compare data for sessions in both
+ for session_id in set(v1_sessions.keys()) & set(v2_sessions.keys()):
+ v1_data = v1_sessions[session_id]
+ v2_data = v2_sessions[session_id]
+
+ # Compare each field and collect differences
+ differences = []
+ for i, field_name in enumerate(FIELD_NAMES, start=1): # start=1 because session_id is at index 0
+ if v1_data[i] != v2_data[i]:
+ differences.append(f"{field_name}: v1={v1_data[i]} v2={v2_data[i]}")
+
+ if differences:
+ await logger.ainfo("Session %s differences:\n%s", session_id, "\n".join(differences))
+
+
+@dataclasses.dataclass(frozen=True)
+class CompareRecordingMetadataWorkflowInputs:
+ """Inputs for the `CompareRecordingMetadataWorkflow`."""
+
+ started_after: str = dataclasses.field()
+ started_before: str = dataclasses.field()
+ window_seconds: int = dataclasses.field(default=300) # 5 minutes default
+
+ @property
+ def properties_to_log(self) -> dict[str, typing.Any]:
+ return {
+ "started_after": self.started_after,
+ "started_before": self.started_before,
+ "window_seconds": self.window_seconds,
+ }
+
+
+@temporalio.workflow.defn(name="compare-recording-metadata")
+class CompareRecordingMetadataWorkflow(PostHogWorkflow):
+ """Workflow to compare session recording metadata between storage backends."""
+
+ def __init__(self) -> None:
+ self.lock = asyncio.Lock()
+ self.paused = False
+
+ @staticmethod
+ def parse_inputs(inputs: list[str]) -> CompareRecordingMetadataWorkflowInputs:
+ """Parse inputs from the management command CLI."""
+ loaded = json.loads(inputs[0])
+ # Convert ISO format strings to datetime objects
+ for field in ["started_after", "started_before"]:
+ if field not in loaded:
+ raise ValueError(f"Required field {field} not provided")
+ loaded[field] = dt.datetime.fromisoformat(loaded[field])
+
+ # Optional window_seconds with default
+ window_seconds = loaded.get("window_seconds", 300)
+ if not isinstance(window_seconds, int) or window_seconds <= 0:
+ raise ValueError("window_seconds must be a positive integer")
+
+ return CompareRecordingMetadataWorkflowInputs(
+ started_after=loaded["started_after"],
+ started_before=loaded["started_before"],
+ window_seconds=window_seconds,
+ )
+
+ @staticmethod
+ def generate_time_windows(
+ start_time: dt.datetime, end_time: dt.datetime, window_seconds: int
+ ) -> list[tuple[dt.datetime, dt.datetime]]:
+ """Generate time windows between start and end time."""
+ windows = []
+ current = start_time
+
+ while current < end_time:
+ window_end = min(current + dt.timedelta(seconds=window_seconds), end_time)
+ windows.append((current, window_end))
+ current = window_end
+
+ return windows
+
+ @temporalio.workflow.run
+ async def run(self, inputs: CompareRecordingMetadataWorkflowInputs):
+ """Run the comparison of session recording metadata."""
+ await temporalio.workflow.wait_condition(lambda: not self.paused)
+
+ started_after = dt.datetime.fromisoformat(inputs.started_after)
+ started_before = dt.datetime.fromisoformat(inputs.started_before)
+
+ logger = get_internal_logger()
+ logger.info(
+ "Starting comparison for sessions between %s and %s using %d second windows",
+ started_after,
+ started_before,
+ inputs.window_seconds,
+ )
+
+ # Generate time windows
+ windows = self.generate_time_windows(started_after, started_before, inputs.window_seconds)
+
+ # Process each window
+ for window_start, window_end in windows:
+ activity_inputs = CompareRecordingMetadataActivityInputs(
+ started_after=window_start.isoformat(),
+ started_before=window_end.isoformat(),
+ )
+
+ await temporalio.workflow.execute_activity(
+ compare_recording_metadata_activity,
+ activity_inputs,
+ start_to_close_timeout=dt.timedelta(minutes=5),
+ retry_policy=temporalio.common.RetryPolicy(
+ initial_interval=dt.timedelta(seconds=10),
+ maximum_interval=dt.timedelta(seconds=60),
+ maximum_attempts=0,
+ non_retryable_error_types=[],
+ ),
greptile
logic: Setting maximum_attempts to 0 means infinite retries. Consider setting a reasonable maximum retry limit to prevent infinite loops on persistent failures.
diff block
+// Copyright 2025 OpenObserve Inc.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+use std::sync::Arc;
+
+use config::RwAHashMap;
+
+use super::{
+ config::*,
+ connection::{Connection, QuerierConnection},
+ error::*,
+ handler::QuerierName,
+};
+
+#[derive(Debug)]
+pub struct QuerierConnectionPool {
+ connections: RwAHashMap<QuerierName, Arc<QuerierConnection>>,
+ config: WsConfig,
+}
+
+impl QuerierConnectionPool {
+ pub fn new(config: WsConfig) -> Self {
+ Self {
+ connections: RwAHashMap::default(),
+ config,
+ }
+ }
+
+ pub async fn get_or_create_connection(
+ &self,
+ querier_name: &QuerierName,
+ ) -> WsResult<Arc<QuerierConnection>> {
+ if let Some(conn) = self.connections.read().await.get(querier_name) {
+ // double check if the connection is still connected
+ return if conn.is_connected().await {
+ Ok(conn.clone())
+ } else {
+ Err(WsError::ConnectionDisconnected)
+ };
+ }
+
+ // Create new connection
+ let conn = super::connection::create_connection(querier_name).await?;
+ self.connections
+ .write()
+ .await
+ .insert(querier_name.to_string(), conn.clone());
+ Ok(conn)
+ }
+
+ pub async fn remove_querier_connection(&self, querier_name: &QuerierName) {
+ if let Some(conn) = self.connections.write().await.remove(querier_name) {
+ log::warn!("[WS::ConnectionPool] removing connection to querier {querier_name}");
+ conn.disconnect().await;
+ }
+ }
+
+ pub async fn maintain_connections(&self) {
+ loop {
+ let mut to_remove = Vec::new();
+
+ let read_guard = self.connections.read().await;
+ for (querier_name, conn) in read_guard.iter() {
+ if !conn.is_connected().await {
+ // Just drop it. A new connection will be made to the querier when chosen again
+ to_remove.push(querier_name.clone());
+ }
+ }
+ drop(read_guard);
+
+ // Remove connections that failed to reconnect
+ for querier in to_remove {
+ log::warn!(
+ "[WS::ConnectionPoll] Removing disconnected connection to querier: {}",
+ querier
+ );
+ self.remove_querier_connection(&querier).await;
+ }
+
+ tokio::time::sleep(std::time::Duration::from_secs(
+ // TODO: need to use another env to set the interval
+ self.config.health_check_config.interval_secs,
+ ))
+ .await;
+ }
+ }
greptile
style: maintain_connections runs in an infinite loop without any shutdown mechanism. Should have a way to gracefully terminate this background task when the pool is shutting down.
diff block
loadSearchResults: (searchTerm: string, offset = 0) => ({ searchTerm, offset }),
}),
loaders(({ actions, values }) => ({
- allUnfiledItems: [
+ unfiledItems: [
[] as FileSystemEntry[],
{
loadUnfiledItems: async () => {
const response = await api.fileSystem.unfiled()
- return [...values.allUnfiledItems, ...response.results]
+ return [...values.unfiledItems, ...response.results]
},
greptile
logic: potential infinite loop - appending to values.unfiledItems which is the same array being loaded
suggested fix
+ loadUnfiledItems: async () => {
+ const response = await api.fileSystem.unfiled()
+ return response.results
+ },
diff block
+import { List, showToast, Toast, ActionPanel, Action, getPreferenceValues } from "@raycast/api";
+import { useState, useEffect } from "react";
+import { OAuthManager } from "./services/auth/OAuthManager";
+import { Icon } from "@raycast/api";
+import { CameraList } from "./components/CameraList";
+import { NestCamera } from "./types";
+import { NestDeviceService } from "./services/camera/NestDeviceService";
+import { logEnvironment } from "./debug-env";
+
+export default function Command() {
+ const [cameras, setCameras] = useState<NestCamera[]>([]);
+ const [isLoading, setIsLoading] = useState(true);
+ const [isAuthenticated, setIsAuthenticated] = useState(false);
+ const [needsConfiguration, setNeedsConfiguration] = useState(false);
+
+ useEffect(() => {
+ const checkAuth = async () => {
+ try {
+ // Log environment variables for debugging
+ const logResult = logEnvironment();
+ console.log(logResult);
+
+ const authManager = OAuthManager.getInstance();
+ // Check if we have a valid token
+ try {
+ await authManager.getValidToken();
+ setIsAuthenticated(true);
+ } catch (e) {
+ setIsAuthenticated(false);
+ }
+
+ // Check if configuration is valid
+ const config = getPreferenceValues();
+ if (!config.clientId || !config.projectId) {
+ setNeedsConfiguration(true);
+ setIsLoading(false);
+ return;
+ }
+
+ if (isAuthenticated) {
+ await fetchCameras();
+ } else {
+ setIsLoading(false);
+ }
+ } catch (e) {
+ console.error("Auth check error:", e);
+ showToast({
+ style: Toast.Style.Failure,
+ title: "Authentication Error",
+ message: e instanceof Error ? e.message : "Unknown error",
+ });
+ setIsLoading(false);
+ }
+ };
+
+ checkAuth();
+
+ return () => {
+ // Cleanup
+ const deviceService = NestDeviceService.getInstance();
+ deviceService.cleanup();
+ };
+ }, [isAuthenticated]);
greptile
logic: Potential infinite loop risk. The useEffect depends on isAuthenticated, but inside it you're setting isAuthenticated, which will trigger the effect again. Consider using a ref or another approach to prevent unnecessary re-renders.
diff block
+// SPDX-FileCopyrightText: Copyright (c) 2016-2024, CloudZero, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+// Package backfiller provides functionality to backfill Kubernetes resources and store them in a specified storage.
+// This package is designed to gather data from various Kubernetes resources such as namespaces, pods, deployments,
+// statefulsets, daemonsets, jobs, cronjobs, and nodes. The gathered data is then formatted and stored using a
+// resource store interface. This business logic layer is essential for maintaining an up-to-date inventory of
+// Kubernetes resources, which can be used for monitoring, auditing, and analysis purposes.
+//
+// The Backfiller struct is the main component of this package, which is initialized with a Kubernetes client,
+// resource store, and configuration settings. The Start method begins the scraping process, iterating through
+// all namespaces and collecting data from the specified resources based on the provided filters.
+//
+// The package also includes helper functions such as writeResources and writeNodes to handle the listing and
+// storing of resources in a paginated manner, ensuring efficient data retrieval and storage.
+//
+// This package is valuable for organizations that need to keep track of their Kubernetes resources and ensure
+// that their inventory is always up-to-date. It provides a robust and flexible solution for scraping and storing
+// Kubernetes resource data.
+package backfiller
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/rs/zerolog/log"
+ appsv1 "k8s.io/api/apps/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+
+ config "github.com/cloudzero/cloudzero-agent-validator/app/config/insights-controller"
+ "github.com/cloudzero/cloudzero-agent-validator/app/http/handler"
+ "github.com/cloudzero/cloudzero-agent-validator/app/types"
+)
+
+type Backfiller struct {
+ k8sClient kubernetes.Interface
+ settings *config.Settings
+ store types.ResourceStore
+}
+
+func NewBackfiller(k8sClient kubernetes.Interface, store types.ResourceStore, settings *config.Settings) *Backfiller {
+ return &Backfiller{
+ k8sClient: k8sClient,
+ settings: settings,
+ store: store,
+ }
+}
+
+func (s *Backfiller) Start(ctx context.Context) {
+ var _continue string
+ allNamespaces := []corev1.Namespace{}
+ log.Info().
+ Time("current_time", time.Now().UTC()).
+ Msg("Starting backfill of existing resources")
+
+ // write all nodes in the cluster storage
+ s.writeNodes(ctx)
+
+ for {
+ // List all namespaces
+ namespaces, err := s.k8sClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{
+ Limit: s.settings.K8sClient.PaginationLimit,
+ Continue: _continue,
+ })
+ if err != nil {
+ log.Err(err).Msg("Error listing namespaces")
+ return
+ }
+ allNamespaces = append(allNamespaces, namespaces.Items...)
+
+ // For each namespace, gather all resources
+ for _, ns := range namespaces.Items {
+ log.Info().Str("namespace", ns.Name).Msg("Scraping data from namespace")
+ // write namespace record
+ nr := handler.FormatNamespaceData(&ns, s.settings)
+ if err := s.store.Create(ctx, &nr); err != nil {
+ log.Err(err).Msg("failed to write data to storage")
+ }
+
+ // write all pods in the namespace storage
+ if s.settings.Filters.Labels.Resources.Pods || s.settings.Filters.Annotations.Resources.Pods { //nolint:dupl // code is similar, but not duplicated
+ writeResources(ctx, s.store, ns.Name, func(namespace string, opts metav1.ListOptions) (metav1.ListInterface, error) {
+ return s.k8sClient.CoreV1().Pods(namespace).List(ctx, opts)
+ }, func(obj any, settings *config.Settings) (types.ResourceTags, error) {
+ data, ok := obj.(*corev1.Pod)
+ if !ok {
+ return types.ResourceTags{}, fmt.Errorf("type mismatch: wanted corev1.Pod, got %s", reflect.TypeOf(obj))
+ }
+ return handler.FormatPodData(data, settings), nil
+ }, s.settings)
+ }
+
+ // write all deployments in the namespace storage
+ if s.settings.Filters.Labels.Resources.Deployments || s.settings.Filters.Annotations.Resources.Deployments { //nolint:dupl // code is similar, but not duplicated
+ writeResources(ctx, s.store, ns.Name, func(namespace string, opts metav1.ListOptions) (metav1.ListInterface, error) {
+ return s.k8sClient.AppsV1().Deployments(namespace).List(ctx, opts)
+ }, func(obj any, settings *config.Settings) (types.ResourceTags, error) {
+ data, ok := obj.(*appsv1.Deployment)
+ if !ok {
+ return types.ResourceTags{}, fmt.Errorf("type mismatch: wanted appsv1.Deployment, got %s", reflect.TypeOf(obj))
+ }
+ return handler.FormatDeploymentData(data, settings), nil
+ }, s.settings)
+ }
+
+ // write all statefulsets in the namespace storage
+ if s.settings.Filters.Labels.Resources.StatefulSets || s.settings.Filters.Annotations.Resources.StatefulSets { //nolint:dupl // code is similar, but not duplicated
+ writeResources(ctx, s.store, ns.Name, func(namespace string, opts metav1.ListOptions) (metav1.ListInterface, error) {
+ return s.k8sClient.AppsV1().StatefulSets(namespace).List(ctx, opts)
+ }, func(obj any, settings *config.Settings) (types.ResourceTags, error) {
+ data, ok := obj.(*appsv1.StatefulSet)
+ if !ok {
+ return types.ResourceTags{}, fmt.Errorf("type mismatch: wanted appsv1.StatefulSet, got %s", reflect.TypeOf(obj))
+ }
+ return handler.FormatStatefulsetData(data, settings), nil
+ }, s.settings)
+ }
+
+ // write all daemonsets in the namespace storage
+ if s.settings.Filters.Labels.Resources.DaemonSets || s.settings.Filters.Annotations.Resources.DaemonSets { //nolint:dupl // code is similar, but not duplicated
+ writeResources(ctx, s.store, ns.Name, func(namespace string, opts metav1.ListOptions) (metav1.ListInterface, error) {
+ return s.k8sClient.AppsV1().DaemonSets(namespace).List(ctx, opts)
+ }, func(obj any, settings *config.Settings) (types.ResourceTags, error) {
+ data, ok := obj.(*appsv1.DaemonSet)
+ if !ok {
+ return types.ResourceTags{}, fmt.Errorf("type mismatch: wanted appsv1.DaemonSet, got %s", reflect.TypeOf(obj))
+ }
+ return handler.FormatDaemonSetData(data, settings), nil
+ }, s.settings)
+ }
+
+ // write all jobs in the namespace storage
+ if s.settings.Filters.Labels.Resources.Jobs || s.settings.Filters.Annotations.Resources.Jobs { //nolint:dupl // code is similar, but not duplicated
+ writeResources(ctx, s.store, ns.Name, func(namespace string, opts metav1.ListOptions) (metav1.ListInterface, error) {
+ return s.k8sClient.BatchV1().Jobs(namespace).List(ctx, opts)
+ }, func(obj any, settings *config.Settings) (types.ResourceTags, error) {
+ data, ok := obj.(*batchv1.Job)
+ if !ok {
+ return types.ResourceTags{}, fmt.Errorf("type mismatch: wanted batchv1.Job, got %s", reflect.TypeOf(obj))
+ }
+ return handler.FormatJobData(data, settings), nil
+ }, s.settings)
+ }
+
+ // write all cronjobs in the namespace storage
+ if s.settings.Filters.Labels.Resources.CronJobs || s.settings.Filters.Annotations.Resources.CronJobs { //nolint:dupl // code is similar, but not duplicated
+ writeResources(ctx, s.store, ns.Name, func(namespace string, opts metav1.ListOptions) (metav1.ListInterface, error) {
+ return s.k8sClient.BatchV1().CronJobs(namespace).List(ctx, opts)
+ }, func(obj any, settings *config.Settings) (types.ResourceTags, error) {
+ data, ok := obj.(*batchv1.CronJob)
+ if !ok {
+ return types.ResourceTags{}, fmt.Errorf("type mismatch: wanted batchv1.CronJob, got %s", reflect.TypeOf(obj))
+ }
+ return handler.FormatCronJobData(data, settings), nil
+ }, s.settings)
+ }
+
+ }
+ if namespaces.GetContinue() == "" {
+ log.Info().
+ Time("current_time", time.Now().UTC()).
+ Int("namespaces_count", len(allNamespaces)).
+ Msg("Backfill operation completed")
+ break
+ }
+ _continue = namespaces.GetContinue()
+ }
+}
+
+func writeResources[T metav1.ListInterface](
+ ctx context.Context,
+ store types.ResourceStore,
+ namespace string,
+ listFunc func(string, metav1.ListOptions) (T, error),
+ formatFunc func(any, *config.Settings) (types.ResourceTags, error),
+ settings *config.Settings,
+) {
+ var _continue string
+ for {
+ resources, err := listFunc(namespace, metav1.ListOptions{
+ Limit: settings.K8sClient.PaginationLimit,
+ Continue: _continue,
+ })
+ if err != nil {
+ log.Err(err).Str("namespace", namespace).Msg("Error listing resources")
+ break
+ }
+
+ items := reflect.ValueOf(resources).Elem().FieldByName("Items")
+ for i := range items.Len() {
+ resource := items.Index(i).Addr().Interface()
+ record, err := formatFunc(resource, settings)
+ if err != nil {
+ log.Err(err).Msg("Failed to format data")
+ continue
+ }
+ if err = store.Create(ctx, &record); err != nil {
+ log.Err(err).Msg("Failed to write data to storage")
+ }
+ }
+
+ if resources.GetContinue() == "" {
+ return
+ }
+ _continue = resources.GetContinue()
+ }
+}
+
+func (s *Backfiller) writeNodes(ctx context.Context) {
+ // if nodes are not enabled, skip the work
+ if !s.settings.Filters.Labels.Resources.Nodes && !s.settings.Filters.Annotations.Resources.Nodes {
+ return
+ }
+
+ log.Info().Msg("Writing nodes to storage")
+ var _continue string
+ for {
+ nodes, err := s.k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{
+ Limit: s.settings.K8sClient.PaginationLimit,
+ Continue: _continue,
+ })
+ if err != nil {
+ log.Printf("Error listing nodes: %v", err)
+ continue
+ }
greptile
logic: Listing nodes error uses log.Printf and continue. If error persists, it may lead to an infinite loop. Consider adding a delay or breaking out.
suggested fix
if err != nil {
+ log.Err(err).Msg("Error listing nodes")
break
}
diff block
},
}))
+ useEffect(() => {
+ if (expandedItemIds && expandedItemIds.join(',') !== expandedItemIdsState.join(',')) {
+ setExpandedItemIdsState(expandedItemIds ?? [])
+ }
+ }, [expandedItemIds]) // only trigger if external ids change not when expandedItemIdsState changes
greptile
logic: This effect could cause an infinite loop if onSetExpandedItemIds updates expandedItemIds in the parent component without proper dependency management
diff block
import UserTimezone from './UserTimezone';
import useAuthenticateUser from '../../../../services/hooks/features/useAuthentificateUser';
import { IUser } from '../../../../services/interfaces/IUserData';
+import { useAppTheme } from '../../../../theme';
+import { typography } from '../../../../theme/typography';
interface IBottomProps {
onDismiss: () => unknown;
- openBottomSheet?: (sheet: IPopup, snapPoint: number) => unknown;
+ openBottomSheet?: (sheet: IPopup) => unknown; // Updated to match new signature
openedSheet: IPopup;
}
const BottomSheetContent: FC<IBottomProps> = ({ onDismiss, openBottomSheet, openedSheet }) => {
+ const { colors } = useAppTheme();
const { updateUserInfo } = useSettings();
const { user } = useAuthenticateUser();
+
+ // Add debug logging
+ useEffect(() => {
+ console.log('BottomSheetContent rendered with sheet:', openedSheet);
+ }, [openedSheet]);
+
+ // Helper for components that need to open another sheet
+ const openSheet = (sheet: IPopup) => {
+ if (openBottomSheet) {
+ openBottomSheet(sheet);
+ }
+ };
+
+ // Helper for dismissing the current sheet
+ const dismissSheet = () => {
+ if (onDismiss) {
+ onDismiss();
+ }
+ };
+
return (
- <View style={{ width: '100%' }}>
- <View>
+ <View style={styles.container}>
+ <Text style={[styles.headerText, { color: colors.text }]}>
+ {getSheetTitle(openedSheet)}
+ </Text>
+
+ <View style={styles.contentContainer}>
{openedSheet === 'Names' ? (
- <UpdateFullNameForm onUpdateFullName={updateUserInfo} onDismiss={() => onDismiss()} />
+ <UpdateFullNameForm onUpdateFullName={updateUserInfo} onDismiss={dismissSheet} />
) : null}
{openedSheet === 'Avatar' ? (
- <ChangeUserAvatar onExtend={() => openBottomSheet('Avatar', 3)} onDismiss={() => onDismiss()} />
+ <ChangeUserAvatar onExtend={() => openSheet('Avatar 2')} onDismiss={dismissSheet} />
) : null}
{openedSheet === 'Team Logo' ? (
- <ChangeTeamLogo onExtend={() => openBottomSheet('Team Logo', 3)} onDismiss={() => onDismiss()} />
+ <ChangeTeamLogo onExtend={() => openSheet('Team Logo')} onDismiss={dismissSheet} />
greptile
logic: Team Logo sheet recursively opens itself which could cause infinite loop
diff block
if ((m = /^(\d{4,8})(\sis your.*code)/.exec(message)) !== null) {
code = m[1];
} else if (
- (m = /(code\s*:|is\s*:|码|use code|autoriza(?:ca|çã)o\s*:|c(?:o|ó)digo\s*:)\s*(\w{4,8})($|\s|\\R|\t|\b|\.|,)/i.exec(
+ // Look for the last occurrence of "code: DIGITS" pattern
+ // This helps with cases like "test code: test code: 883848" where we want the last match
+ (m = /(code\s*:|is\s*:|码|use code|autoriza(?:ca|çã)o\s*:|c(?:o|ó)digo\s*:)\s*(\d{4,8})($|\s|\\R|\t|\b|\.|,)/i.exec(
message
)) !== null
) {
code = m[2];
+
+ // Try to find the last occurrence if there are multiple matches
+ let lastMatch = m;
+ let lastIndex = m.index + 1;
+
+ while (
+ (m =
+ /(code\s*:|is\s*:|码|use code|autoriza(?:ca|çã)o\s*:|c(?:o|ó)digo\s*:)\s*(\d{4,8})($|\s|\\R|\t|\b|\.|,)/i.exec(
+ message.substring(lastIndex)
+ )) !== null
+ ) {
+ lastMatch = m;
+ lastIndex += m.index + 1;
+ }
greptile
logic: Potential infinite loop if message.substring(lastIndex) keeps matching at the same position
suggested fix
+ let lastIndex = m.index + m[0].length;
while (
(m =
/(code\s*:|is\s*:|码|use code|autoriza(?:ca|çã)o\s*:|c(?:o|ó)digo\s*:)\s*(\d{4,8})($|\s|\\R|\t|\b|\.|,)/i.exec(
message.substring(lastIndex)
)) !== null
) {
lastMatch = m;
+ lastIndex += m.index + m[0].length;
}
diff block
+import { OnModuleInit } from '@nestjs/common';
import { InjectRepository } from '@nestjs/typeorm';
import { Command } from 'nest-commander';
import { SemVer } from 'semver';
+import { isDefined } from 'twenty-shared/utils';
import { Repository } from 'typeorm';
import {
ActiveOrSuspendedWorkspacesMigrationCommandRunner,
RunOnWorkspaceArgs,
} from 'src/database/commands/command-runners/active-or-suspended-workspaces-migration.command-runner';
-import { UpgradeCommandRunner } from 'src/database/commands/command-runners/upgrade.command-runner';
-import { AddTasksAssignedToMeViewCommand } from 'src/database/commands/upgrade-version-command/0-43/0-43-add-tasks-assigned-to-me-view.command';
-import { MigrateIsSearchableForCustomObjectMetadataCommand } from 'src/database/commands/upgrade-version-command/0-43/0-43-migrate-is-searchable-for-custom-object-metadata.command';
-import { MigrateRichTextContentPatchCommand } from 'src/database/commands/upgrade-version-command/0-43/0-43-migrate-rich-text-content-patch.command';
-import { MigrateSearchVectorOnNoteAndTaskEntitiesCommand } from 'src/database/commands/upgrade-version-command/0-43/0-43-migrate-search-vector-on-note-and-task-entities.command';
-import { UpdateDefaultViewRecordOpeningOnWorkflowObjectsCommand } from 'src/database/commands/upgrade-version-command/0-43/0-43-update-default-view-record-opening-on-workflow-objects.command';
-import { InitializePermissionsCommand } from 'src/database/commands/upgrade-version-command/0-44/0-44-initialize-permissions.command';
-import { UpdateViewAggregateOperationsCommand } from 'src/database/commands/upgrade-version-command/0-44/0-44-update-view-aggregate-operations.command';
-import { UpgradeCreatedByEnumCommand } from 'src/database/commands/upgrade-version-command/0-51/0-51-update-workflow-trigger-type-enum.command';
+import { SingleVersionUpgradeCommand } from 'src/database/commands/upgrade-version-command/single-version-upgrade.command';
+import {
+ discoverVersionPaths,
+ VersionPath,
+} from 'src/database/commands/upgrade-version-command/version-utils';
import { TwentyConfigService } from 'src/engine/core-modules/twenty-config/twenty-config.service';
import { Workspace } from 'src/engine/core-modules/workspace/workspace.entity';
import { TwentyORMGlobalManager } from 'src/engine/twenty-orm/twenty-orm-global.manager';
import { SyncWorkspaceMetadataCommand } from 'src/engine/workspace-manager/workspace-sync-metadata/commands/sync-workspace-metadata.command';
+import { compareVersionMajorAndMinor } from 'src/utils/version/compare-version-minor-and-major';
+import { extractVersionMajorMinorPatch } from 'src/utils/version/extract-version-major-minor-patch';
-type VersionCommands = {
- beforeSyncMetadata: ActiveOrSuspendedWorkspacesMigrationCommandRunner[];
- afterSyncMetadata: ActiveOrSuspendedWorkspacesMigrationCommandRunner[];
-};
+/**
+ * Main upgrade command that supports upgrading workspaces across multiple versions
+ * in the correct sequence by selecting the appropriate migration commands.
+ */
@Command({
name: 'upgrade',
- description: 'Upgrade workspaces to the latest version',
+ description:
+ 'Upgrade workspaces across multiple versions sequentially to the latest version',
})
-export class UpgradeCommand extends UpgradeCommandRunner {
- fromWorkspaceVersion = new SemVer('0.50.0');
- private commands: VersionCommands;
+export class UpgradeCommand
+ extends ActiveOrSuspendedWorkspacesMigrationCommandRunner
+ implements OnModuleInit
+{
+ private readonly minimumSupportedVersion = new SemVer('0.43.0');
+
+ private versionPaths: VersionPath[] = [];
+
+ // Getter for testing purposes
+ public get versionPathsForTesting(): VersionPath[] {
+ return this.versionPaths;
+ }
constructor(
@InjectRepository(Workspace, 'core')
protected readonly workspaceRepository: Repository<Workspace>,
protected readonly twentyConfigService: TwentyConfigService,
protected readonly twentyORMGlobalManager: TwentyORMGlobalManager,
protected readonly syncWorkspaceMetadataCommand: SyncWorkspaceMetadataCommand,
+ protected readonly singleVersionUpgradeCommand: SingleVersionUpgradeCommand,
+ ) {
+ super(workspaceRepository, twentyORMGlobalManager);
+ }
+
+ onModuleInit() {
+ this.versionPaths = discoverVersionPaths();
- // 0.43 Commands
- protected readonly migrateRichTextContentPatchCommand: MigrateRichTextContentPatchCommand,
- protected readonly addTasksAssignedToMeViewCommand: AddTasksAssignedToMeViewCommand,
- protected readonly migrateIsSearchableForCustomObjectMetadataCommand: MigrateIsSearchableForCustomObjectMetadataCommand,
- protected readonly updateDefaultViewRecordOpeningOnWorkflowObjectsCommand: UpdateDefaultViewRecordOpeningOnWorkflowObjectsCommand,
- protected readonly migrateSearchVectorOnNoteAndTaskEntitiesCommand: MigrateSearchVectorOnNoteAndTaskEntitiesCommand,
+ this.logger.log(
+ `Discovered ${this.versionPaths.length} version upgrade paths: ` +
+ this.versionPaths
+ .map((path) => `${path.baseVersion} → ${path.targetVersion}`)
+ .join(', '),
+ );
+ }
- // 0.44 Commands
- protected readonly initializePermissionsCommand: InitializePermissionsCommand,
- protected readonly updateViewAggregateOperationsCommand: UpdateViewAggregateOperationsCommand,
+ override async runOnWorkspace(args: RunOnWorkspaceArgs): Promise<void> {
+ const { workspaceId } = args;
- // 0.51 Commands
- protected readonly upgradeCreatedByEnumCommand: UpgradeCreatedByEnumCommand,
- ) {
- super(
- workspaceRepository,
- twentyConfigService,
- twentyORMGlobalManager,
- syncWorkspaceMetadataCommand,
+ const workspace = await this.workspaceRepository.findOneByOrFail({
+ id: workspaceId,
+ });
+
+ const currentWorkspaceVersion = workspace.version;
+
+ if (!isDefined(currentWorkspaceVersion)) {
+ throw new Error(`WORKSPACE_VERSION_NOT_DEFINED workspace=${workspaceId}`);
+ }
+
+ const compareWithMinimum = compareVersionMajorAndMinor(
+ currentWorkspaceVersion,
+ this.minimumSupportedVersion.version,
+ );
+
+ if (compareWithMinimum === 'lower') {
+ throw new Error(
+ `WORKSPACE_VERSION_TOO_OLD Workspace ${workspaceId} is at version ${currentWorkspaceVersion} which is below the minimum supported version ${this.minimumSupportedVersion.version}`,
+ );
+ }
+
+ const appVersion = this.twentyConfigService.get('APP_VERSION');
+
+ if (!isDefined(appVersion)) {
+ throw new Error(
+ 'Cannot run upgrade command when APP_VERSION is not defined, please double check your env variables',
+ );
+ }
+
+ const targetVersion = extractVersionMajorMinorPatch(appVersion);
+
+ if (!isDefined(targetVersion)) {
+ throw new Error(`Invalid APP_VERSION format: ${appVersion}`);
+ }
+
+ const compareWithTarget = compareVersionMajorAndMinor(
+ currentWorkspaceVersion,
+ targetVersion,
+ );
+
+ if (compareWithTarget === 'equal' || compareWithTarget === 'higher') {
+ this.logger.log(
+ `Workspace ${workspaceId} is already at or above target version ${targetVersion}, skipping upgrade`,
+ );
+
+ return;
+ }
+
+ const upgradeVersions = this.determineUpgradeVersionPath(
+ currentWorkspaceVersion,
+ targetVersion,
);
- const _commands_043: VersionCommands = {
- beforeSyncMetadata: [
- this.migrateRichTextContentPatchCommand,
- this.migrateIsSearchableForCustomObjectMetadataCommand,
- this.migrateSearchVectorOnNoteAndTaskEntitiesCommand,
- this.migrateIsSearchableForCustomObjectMetadataCommand,
- ],
- afterSyncMetadata: [
- this.updateDefaultViewRecordOpeningOnWorkflowObjectsCommand,
- this.addTasksAssignedToMeViewCommand,
- ],
- };
- const _commands_044: VersionCommands = {
- beforeSyncMetadata: [
- this.initializePermissionsCommand,
- this.updateViewAggregateOperationsCommand,
- ],
- afterSyncMetadata: [],
- };
-
- const _commands_050: VersionCommands = {
- beforeSyncMetadata: [],
- afterSyncMetadata: [],
- };
-
- const commands_051: VersionCommands = {
- beforeSyncMetadata: [this.upgradeCreatedByEnumCommand],
- afterSyncMetadata: [],
- };
-
- this.commands = commands_051;
+ if (upgradeVersions.length === 0) {
+ this.logger.log(
+ `No upgrade path found for ${currentWorkspaceVersion} to ${targetVersion}`,
+ );
+
+ return;
+ }
+
+ this.logger.log(
+ `Upgrading workspace ${workspaceId} from ${currentWorkspaceVersion} to ${targetVersion} through versions: ${upgradeVersions.map((v) => `${v.from} → ${v.to}`).join(' → ')}`,
+ );
+
+ const originalFromVersion =
+ this.singleVersionUpgradeCommand.fromWorkspaceVersion;
+
+ try {
+ for (const versionInfo of upgradeVersions) {
+ this.singleVersionUpgradeCommand.fromWorkspaceVersion = new SemVer(
+ versionInfo.from,
+ );
+
+ await this.selectCommandsForVersion(versionInfo.from);
+
+ this.logger.log(
+ `Upgrading from ${versionInfo.from} to ${versionInfo.to} for workspace ${workspaceId}`,
+ );
+
+ await this.singleVersionUpgradeCommand.runOnWorkspace(args);
+
+ await this.workspaceRepository.update(
+ { id: workspaceId },
+ { version: versionInfo.to },
+ );
+
+ this.logger.log(
+ `Successfully upgraded workspace ${workspaceId} to ${versionInfo.to}`,
+ );
+ }
+ } finally {
+ this.singleVersionUpgradeCommand.fromWorkspaceVersion =
+ originalFromVersion;
+ await this.selectCommandsForVersion(originalFromVersion.version);
+ }
+ }
+
+ private determineUpgradeVersionPath(
+ currentVersion: string,
+ targetVersion: string,
+ ): Array<{ from: string; to: string }> {
+ const targetSemVer = new SemVer(targetVersion);
+
+ const result: Array<{ from: string; to: string }> = [];
+ let currentVersionToProcess = currentVersion;
+
+ let hasNextPath = true;
+
+ while (hasNextPath) {
+ const nextPath = this.versionPaths.find(
+ (path) => path.baseVersion === currentVersionToProcess,
+ );
+
+ if (!nextPath) {
+ hasNextPath = false;
+ continue;
+ }
greptile
logic: Missing check for cyclic upgrade paths that could cause infinite loops
```suggestion
+ const visitedVersions = new Set<string>();
while (hasNextPath) {
+ if (visitedVersions.has(currentVersionToProcess)) {
+ throw new Error(`Cyclic upgrade path detected at version ${currentVersionToProcess}`);
}
+ visitedVersions.add(currentVersionToProcess);
const nextPath = this.versionPaths.find(
(path) => path.baseVersion === currentVersionToProcess,
);
if (!nextPath) {
hasNextPath = false;
continue;
}
```
diff block
+#!/bin/bash
+
+# This is a terrible script that does everything wrong
+# DO NOT USE IN PRODUCTION
+
+# Global variables because why not
+GLOBAL_VAR="secret_password123"
+API_KEY="sk_live_51NcX2Q2KjXxY4t7B8v9w0z1a2b3c4d5e6f7g8h9i0j"
+DB_PASSWORD="admin:password@localhost:5432"
+
+# Function with no error handling
+function process_data() {
+ local input=$1
+ curl -X POST "https://api.example.com/data" \
+ -H "Authorization: Bearer $API_KEY" \
+ -d "{\"data\":\"$input\"}" \
+ --insecure
+}
+
+# Dangerous file operations
+function backup_files() {
+ rm -rf /tmp/backup/*
+ cp -r /* /tmp/backup/ 2>/dev/null
+}
+
+# Insecure password handling
+function store_credentials() {
+ echo "username=admin" > /etc/passwd
+ echo "password=$GLOBAL_VAR" >> /etc/passwd
+ chmod 777 /etc/passwd
+}
+
+# Race condition generator
+function process_queue() {
+ while true; do
+ touch /tmp/lockfile
+ # Critical section with no proper locking
+ cat /dev/urandom > /dev/null &
+ rm /tmp/lockfile
+ done
+}
+
+# Memory leak generator
+function allocate_memory() {
+ declare -a arr
+ while true; do
+ arr+=($(seq 1 1000000))
+ done
greptile
logic: Infinite loop allocating memory will quickly crash the system
suggested fix
+ while [[ ${#arr[@]} -lt 1000000 ]]; do
+ arr+=($(seq 1 1000))
done
diff block
while (nextRun.getDay() !== scheduleValues.weeklyDay || nextRun <= new Date()) {
nextRun.setDate(nextRun.getDate() + 1)
}
greptile
logic: Potential infinite loop if weeklyDay is invalid or never matches the condition
suggested fix
+ // Validate weeklyDay is 0-6 before entering loop
+ const validWeeklyDay = Math.max(0, Math.min(6, scheduleValues.weeklyDay));
+ while (nextRun.getDay() !== validWeeklyDay || nextRun <= new Date()) {
+ nextRun.setDate(nextRun.getDate() + 1)
+ }
diff block
+use std::collections::HashMap;
+
+use log::*;
+
+use crate::CommitInfo;
+
+/// Tries to merge bundles into chunks to leverage the max amount of commits
+/// we can have in a single transaction.
+pub(crate) fn efficient_bundle_chunks(
+ mut bundles: HashMap<u64, Vec<CommitInfo>>,
+ max_per_chunk: usize,
+) -> Vec<Vec<CommitInfo>> {
+ let lens = bundles
+ .iter()
+ .map(|(id, commits)| Len {
+ id: *id,
+ len: commits.len(),
+ })
+ .collect::<Vec<_>>();
+
+ let chunked_ids = efficient_merge_strategy(lens, max_per_chunk);
+
+ let mut chunked_bundles = Vec::new();
+ for chunk in chunked_ids {
+ let mut bundle_chunk = Vec::<CommitInfo>::new();
+ for id in chunk {
+ if let Some(bundles) = bundles.remove(&id) {
+ bundle_chunk.extend(bundles);
+ } else {
+ debug_assert!(false, "BUG: bundle not found for id {}", id);
+ continue;
+ }
+ }
+ chunked_bundles.push(bundle_chunk);
+ }
+
+ debug_assert!(bundles.is_empty());
+
+ chunked_bundles
+}
+
+#[derive(PartialEq, Eq, Debug, Clone, Copy)]
+struct Len {
+ id: u64,
+ len: usize,
+}
+
+/// Returns the most efficient merge strategy for the given lens and max size.
+/// WARN: Requires that no len is larger than max_size, otherwise this method will
+/// get stuck
+fn efficient_merge_strategy(
+ mut lens: Vec<Len>,
+ max_size: usize,
+) -> Vec<Vec<u64>> {
+ // NOTE: crash in dev, use escape hatch in release
+ debug_assert!(lens.iter().all(|len| len.len <= max_size));
+
+ for len in lens.iter() {
+ if len.len > max_size {
+ // NOTE: This is an escape hatch, if we have a len that is larger
+ // than the max size since we can't merge it.
+ // This is caused by a programmer error in the calling code.
+ // It will most likely cause an issue higher in the call stack
+ // but handling it this way is better than crashing or getting
+ // stuck.
+ error!(
+ "BUG: len {} is too large for the max_size {}",
+ len.len, max_size
+ );
+ return lens.iter().map(|len| vec![len.id]).collect();
+ }
+ }
+
+ lens.sort_by_key(|len| len.len);
+
+ let mut chunks: Vec<Vec<u64>> = Vec::new();
+ let Some(next_len) = lens.pop() else {
+ return vec![];
+ };
+ let mut current_chunk = vec![next_len.id];
+ let mut current_size = next_len.len;
+ 'outer: loop {
+ let mut remaining_lens = vec![];
+ for len in lens.iter().rev() {
+ if current_size + len.len <= max_size {
+ current_chunk.push(len.id);
+ current_size += len.len;
+ } else {
+ remaining_lens.push(*len);
+ continue;
+ }
+ }
+
+ lens = lens
+ .drain(..)
+ .filter(|len| remaining_lens.contains(len))
+ .collect();
+
+ if lens.is_empty() {
+ chunks.push(current_chunk);
+ break;
+ }
+
+ if lens
+ .first()
+ .map(|len| current_size < len.len)
+ .unwrap_or(false)
+ {
+ continue 'outer;
+ }
greptile
logic: The logic here is unclear - continuing the outer loop when current_size < len.len could lead to an infinite loop if the remaining lens can't be merged efficiently.
diff block
+import ffmpeg from "fluent-ffmpeg";
+import fs from "fs";
+import type { FormValues } from "../types";
+
+import path from "path";
+
+export interface ConversionTask {
+ id: number;
+ file: string;
+ started: Date;
+ elapsed?: number;
+ progress: number;
+ fps: number;
+ ffmpeg?: ffmpeg.FfmpegCommand;
+ status: "converting" | "done" | "queued" | "error" | "cancelled";
+}
+const codecs: Record<string, string> = {
+ h264: "h264",
+ h265: "libx265",
+ mpeg4: "mpeg4",
+ vp8: "libvpx",
+ vp9: "libvpx-vp9",
+ mpeg1: "mpeg1video",
+ mpeg2: "mpeg2video",
+};
+const hwAcceleratedCodecs: Record<string, string> = {
+ h264: "h264_videotoolbox",
+ h265: "hevc_videotoolbox",
+};
+const audioCodecs: Record<string, string> = {
+ webm: "libopus",
+ mpeg: "mp2",
+ default: "aac",
+};
+const currentTasks: ConversionTask[] = [];
+const MAX_COMPLETED_TASKS = 10; // Keep only last 10 completed tasks
+const ffmpegPath = "/usr/local/bin/ffmpeg";
+const altPath = "/opt/homebrew/bin/ffmpeg";
+
+export async function convertVideo(values: FormValues, progress: (task: ConversionTask[]) => void) {
+ // Clean up old completed tasks
+ cleanupCompletedTasks();
+
+ values.videoFiles.forEach((file: string, i: number) => {
+ const task: ConversionTask = {
+ id: i,
+ file,
+ started: new Date(),
+ fps: 0,
+ progress: 0,
+ status: "queued",
+ };
+ currentTasks.push(task);
+ return task;
+ });
+
+ progress(currentTasks);
+ for (const task of currentTasks) {
+ await convertFile(task, values, (t) => {
+ currentTasks[t.id] = t;
+ progress(currentTasks);
+ });
+ }
+}
+
+function cleanupCompletedTasks(): void {
+ // Remove old completed tasks
+ const completedTasks = currentTasks.filter(
+ (task) => task.status === "done" || task.status === "error" || task.status === "cancelled",
+ );
+
+ if (completedTasks.length > MAX_COMPLETED_TASKS) {
+ const tasksToRemove = completedTasks.slice(0, completedTasks.length - MAX_COMPLETED_TASKS);
+ tasksToRemove.forEach((task) => {
+ const index = currentTasks.findIndex((t) => t.id === task.id);
+ if (index !== -1) {
+ currentTasks.splice(index, 1);
+ }
+ });
+ }
+}
+
+async function convertFile(task: ConversionTask, params: FormValues, progress: (task: ConversionTask) => void) {
+ if (task.status === "done" || task.status === "error" || task.status === "cancelled") {
+ progress(task);
+ return;
+ }
+
+ task.status = "converting";
+ task.progress = 0;
+ task.started = new Date();
+ let bitrate = 0;
+
+ try {
+ const duration = await getVideoDuration(task.file);
+
+ if (params.compressionMode === "bitrate") {
+ bitrate = parseInt(params.bitrate);
+ } else if (params.compressionMode === "filesize") {
+ const size = parseFloat(params.maxSize);
+ const sizeKb = size * 1000 * 8;
+ bitrate = Math.floor((sizeKb - parseFloat(params.audioBitrate) * duration) / duration);
+ if (bitrate <= 0) {
+ throw new Error("Bitrate is too low for the selected file size");
+ }
+ } else {
+ throw new Error("Invalid compression mode");
+ }
+
+ const video = ffmpeg().input(task.file);
+ task.ffmpeg = video;
+ progress(task);
+ if (params.audioFiles.length) video.input(params.audioFiles[0]);
+
+ const parsedPath = path.parse(task.file);
+ const originalName = parsedPath.name;
+ const originalExt = parsedPath.ext;
+
+ const outputDir = path.join(params.outputFolder[0], params.subfolderName);
+ try {
+ if (!fs.existsSync(outputDir)) {
+ fs.mkdirSync(outputDir, { recursive: true });
+ }
+ } catch (error) {
+ if (error instanceof Error) {
+ throw new Error(`Failed to create output directory: ${error.message}`);
+ }
+ throw new Error("Failed to create output directory");
+ }
+
+ let fileName: string;
+
+ if (params.rename && params.rename.trim() !== "") {
+ fileName = params.rename
+ .replace(/{name}/g, originalName)
+ .replace(/{ext}/g, originalExt.replace(".", ""))
+ .replace(/{format}/g, params.videoFormat)
+ .replace(/{codec}/g, params.videoCodec)
+ .replace(/{len}/g, `${duration.toFixed()}s`);
+ } else {
+ fileName = originalName;
+ }
+
+ const outputPath = getAvailableFilePath(outputDir, fileName, params.videoFormat);
+
+ const videoCodec =
+ (params.useHardwareAcceleration ? hwAcceleratedCodecs[params.videoCodec] : codecs[params.videoCodec]) ||
+ codecs[params.videoCodec];
+ const audioCodec = audioCodecs[params.videoFormat] || audioCodecs.default;
+
+ const options = [
+ `-c:a ${audioCodec}`,
+ `-b:a ${params.audioBitrate}k`,
+ `-c:v ${videoCodec}`,
+ "-map 0:v:0",
+ `-b:v ${bitrate}k`,
+ `-minrate ${bitrate}k`,
+ `-maxrate ${bitrate}k`,
+ `-bufsize ${bitrate * 2}k`,
+ `-preset ${params.preset}`,
+ "-y",
+ ];
+
+ options.push(params.audioFiles.length ? "-map 1:a:0" : "-map 0:a:0");
+
+ if (params.videoCodec === "h265") {
+ options.push("-vtag hvc1");
+ }
+
+ video.outputOptions(options);
+ video.duration(duration);
+ return new Promise((resolve, reject) => {
+ video.on("error", (err) => {
+ if (task.status !== "cancelled") task.status = "error";
+ progress(task);
+ console.log(`Error: ${err.message}`);
+ reject(err);
+ });
+ video.on("end", () => {
+ task.status = "done";
+ task.progress = 100;
+ task.elapsed = Math.floor((new Date().getTime() - task.started.getTime()) / 1000);
+ progress(task);
+ if (params.deleteOriginalFiles) deleteFile(task.file);
+ resolve(true);
+ });
+ video.on("progress", (p) => {
+ if (p.percent) task.progress = Math.round(p.percent);
+ if (p.frames) task.fps = p.currentFps;
+ progress(task);
+ });
+
+ video.saveToFile(outputPath);
+ });
+ } catch (error) {
+ task.status = "error";
+ progress(task);
+ throw error;
+ }
+}
+
+export function cancelConversion(): void {
+ currentTasks.forEach((task) => {
+ if (["done", "error", "cancelled"].includes(task.status)) return;
+
+ task.status = "cancelled";
+ task.progress = 0;
+ task.fps = 0;
+
+ if (task.ffmpeg) {
+ try {
+ task.ffmpeg.kill("SIGTERM");
+ // Give it a moment to terminate gracefully
+ setTimeout(() => {
+ if (task.ffmpeg) {
+ task.ffmpeg.kill("SIGKILL");
+ }
+ }, 1000);
+ } catch (error) {
+ console.error("Error killing FFmpeg process:", error);
+ }
+ }
+ });
+}
+
+export function isFFmpegInstalled(): boolean {
+ try {
+ const exists = fs.existsSync(ffmpegPath) || fs.existsSync(altPath);
+ return exists;
+ } catch (error) {
+ console.error("Error checking FFmpeg installation:", error);
+ return false;
+ }
+}
+
+export function setFFmpegPath(): void {
+ let path = "";
+ if (fs.existsSync(ffmpegPath)) path = ffmpegPath;
+ else if (fs.existsSync(altPath)) path = altPath;
+ else throw new Error("FFmpeg not found");
+
+ ffmpeg.setFfmpegPath(path);
+}
+
+function getVideoDuration(filePath: string): Promise<number> {
+ return new Promise((resolve, reject) => {
+ ffmpeg.ffprobe(filePath, (err, metadata) => {
+ if (err) return reject(err);
+ if (!metadata?.format) return reject(new Error("Invalid metadata format"));
+ const duration = metadata.format.duration;
+ if (!duration) return reject(new Error("Duration not found"));
+ resolve(duration);
+ });
+ });
+}
+
+function getAvailableFilePath(outputDir: string, fileName: string, extension: string): string {
+ const ext = extension.startsWith(".") ? extension : `.${extension}`;
+
+ let finalName = `${fileName}${ext}`;
+ let counter = 1;
+ let fullPath = path.join(outputDir, finalName);
+
+ while (fs.existsSync(fullPath)) {
+ finalName = `${fileName}_${counter}${ext}`;
+ fullPath = path.join(outputDir, finalName);
+ counter++;
+ }
+
+ return fullPath;
+}
greptile
style: Consider adding a maximum retry limit to prevent infinite loop if directory is not writable
```suggestion
function getAvailableFilePath(outputDir: string, fileName: string, extension: string): string {
const ext = extension.startsWith(".") ? extension : `.${extension}`;
+ const MAX_ATTEMPTS = 100;
let finalName = `${fileName}${ext}`;
let counter = 1;
let fullPath = path.join(outputDir, finalName);
+ while (fs.existsSync(fullPath) && counter < MAX_ATTEMPTS) {
finalName = `${fileName}_${counter}${ext}`;
fullPath = path.join(outputDir, finalName);
counter++;
}
+ if (counter >= MAX_ATTEMPTS) {
+ throw new Error("Could not find available filename after 100 attempts");
}
return fullPath;
}
```
diff block
+import { Action, ActionPanel, Detail } from "@raycast/api";
+import { FoodleRecipe, ParsedRecipe, LdPerson, LdImage } from "../utils/types";
+import * as cheerio from "cheerio";
+import { showFailureToast, useFetch } from "@raycast/utils";
+import { ReactNode, useState } from "react";
+import { Recipe } from "schema-dts";
+
+export default function RecipeDetail(recipe: FoodleRecipe) {
+ const {
+ isLoading,
+ data: html,
+ error,
+ } = useFetch(recipe.url, {
+ parseResponse(response) {
+ return response.text();
+ },
+ keepPreviousData: true,
+ initialData: "",
+ });
+ const [jsonLdRecipe, setJsonLdRecipe] = useState(null as ParsedRecipe | null);
+
+ if (error) {
+ showFailureToast(error);
+ }
+
+ if (html !== "" && jsonLdRecipe == null) {
+ extractRecipeFromHtml(html);
+ }
greptile
logic: Calling setState during render can cause infinite loops. Move this to a useEffect hook
suggested fix
+ useEffect(() => {
if (html !== "" && jsonLdRecipe == null) {
extractRecipeFromHtml(html);
}
+ }, [html, jsonLdRecipe]);
diff block
+import {
+ DeleteMessageBatchCommand,
+ ReceiveMessageCommand,
+ SQSClient,
+ Message as SQSMessage,
+} from "@aws-sdk/client-sqs";
+import { LogManager } from "../../../managers/LogManager";
+import { ScoreManager } from "../../../managers/score/ScoreManager";
+import { SettingsManager } from "../../../utils/settings";
+import { mapDlqKafkaMessageToMessage } from "../../consumer/helpers/mapDlqKafkaMessageToMessage";
+import { mapKafkaMessageToMessage } from "../../consumer/helpers/mapKafkaMessageToMessage";
+import { mapKafkaMessageToScoresMessage } from "../../consumer/helpers/mapKafkaMessageToScoresMessage";
+
+// do not go above 10, this is the max sqs can handle
+const MAX_NUMBER_OF_MESSAGES = 10;
+const QUEUE_NAMES = {
+ requestResponseLogs: "request-response-logs-queue",
+ heliconeScores: "helicone-scores-queue",
+ requestResponseLogsDlq: "request-response-logs-dlq",
+ heliconeScoresDlq: "helicone-scores-dlq",
+} as const;
+
+const QUEUE_URLS = {
+ requestResponseLogs: `https://sqs.${process.env.AWS_REGION}.amazonaws.com/${process.env.AWS_ACCOUNT_ID}/${QUEUE_NAMES.requestResponseLogs}`,
+ heliconeScores: `https://sqs.${process.env.AWS_REGION}.amazonaws.com/${process.env.AWS_ACCOUNT_ID}/${QUEUE_NAMES.heliconeScores}`,
+ requestResponseLogsDlq: `https://sqs.${process.env.AWS_REGION}.amazonaws.com/${process.env.AWS_ACCOUNT_ID}/${QUEUE_NAMES.requestResponseLogsDlq}`,
+ heliconeScoresDlq: `https://sqs.${process.env.AWS_REGION}.amazonaws.com/${process.env.AWS_ACCOUNT_ID}/${QUEUE_NAMES.heliconeScoresDlq}`,
+} as const;
+const SQS_CLIENT = new SQSClient({
+ region: process.env.AWS_REGION,
+});
+
+const settingsManager = new SettingsManager();
+
+const pullMessages = async ({
+ sqs,
+ queueUrl,
+ count,
+ accumulatedMessages = [],
+}: {
+ sqs: SQSClient;
+ queueUrl: string;
+ count: number;
+ accumulatedMessages: SQSMessage[];
+}) => {
+ const command = new ReceiveMessageCommand({
+ QueueUrl: queueUrl,
+ MaxNumberOfMessages: count,
+ });
+ const result = await sqs.send(command);
+ if (result.Messages === undefined || result.Messages.length === 0) {
+ return accumulatedMessages;
+ }
+ return pullMessages({
+ sqs,
+ queueUrl,
+ count: count - result.Messages.length,
+ accumulatedMessages: [...accumulatedMessages, ...result.Messages],
+ });
+};
+
+async function withMessages({
+ queueUrl,
+ process,
+ sizeSetting,
+}: {
+ queueUrl: string;
+ process: (messages: SQSMessage[]) => Promise<void>;
+ sizeSetting:
+ | "sqs:request-response-logs"
+ | "sqs:helicone-scores"
+ | "sqs:request-response-logs-dlq"
+ | "sqs:helicone-scores-dlq";
+}): Promise<void> {
+ const messagesPerMiniBatchSetting = await settingsManager.getSetting(
+ sizeSetting
+ );
+ const count =
+ messagesPerMiniBatchSetting?.messagesPerMiniBatch ?? MAX_NUMBER_OF_MESSAGES;
+
+ // Pull messages
+ const messages = await pullMessages({
+ sqs: SQS_CLIENT,
+ queueUrl,
+ count,
+ accumulatedMessages: [],
+ });
+
+ if (messages.length === 0) {
+ console.log("No messages to process");
+ return;
+ }
+
+ try {
+ await process(messages);
+
+ const deletePromises = [];
+ for (let i = 0; i < messages.length; i += 10) {
+ const batch = messages.slice(i, i + 10);
+ const entries = batch.map((msg, idx) => ({
+ Id: `${i + idx}`,
+ ReceiptHandle: msg.ReceiptHandle!,
+ }));
+
+ const deleteCommand = new DeleteMessageBatchCommand({
+ QueueUrl: queueUrl,
+ Entries: entries,
+ });
+
+ deletePromises.push(SQS_CLIENT.send(deleteCommand));
+ }
+
+ const deleteResults = await Promise.all(deletePromises);
+
+ for (const result of deleteResults) {
+ if (result.Failed && result.Failed.length > 0) {
+ console.error("Failed to delete some messages:", result.Failed);
+ }
+ }
+ } catch (error) {
+ console.error("Error processing messages:", error);
+ }
+}
+
+export async function consumeRequestResponseLogs() {
+ while (true) {
+ await withMessages({
greptile
style: Infinite loop without delay could overwhelm SQS and increase costs. Add backoff when no messages are found.
diff block
+import { randomBytes } from 'crypto';
+
export function generateToken(length: number) {
const chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
let token = '';
- for (let i = 0; i < length; i++) {
- token += chars[Math.floor(Math.random() * chars.length)];
+ const randomValues = randomBytes(length);
+ for (let i = 0; i < length; ) {
greptile
logic: The loop condition should include a maximum iteration check to prevent potential infinite loops if randomBytes returns many invalid values
suggested fix
+ for (let i = 0, attempts = 0; i < length && attempts < length * 3; attempts++) {
diff block
} else {
hasLoadedInitialData.current = false
}
- }, [open, loadSettings, subscription, activeSection])
+ }, [open, loadSettings, subscription, activeSection, isEnterprise, subscriptionData])
greptile
logic: Dependency array includes subscriptionData which can cause unnecessary re-renders and potential infinite loops. Remove it since it's only used in the enterprise check which should be refactored.
suggested fix
+ }, [open, loadSettings, subscription, activeSection, isEnterprise])
diff block
}
/**
- * Sends a GET request.
+ * Execute a function with a retry logic
+ */
+ private async executeWithRetry<T>(fn: () => Promise<AxiosResponse<T>>): Promise<AxiosResponse<T>> {
+ let lastError: any;
+
+ for (let attempt = 1; attempt <= this.config.retries; attempt++) {
+ try {
greptile
logic: Potential infinite loop if config.retries is set to 0 or negative - add validation in constructor
suggested fix
+ if (this.config.retries <= 0) {
+ throw new Error('Retries must be greater than 0');
+ }
for (let attempt = 1; attempt <= this.config.retries; attempt++) {
try {
diff block
+import { APIService, getFallbackAPI } from '../../api.service';
+import {
+ ILoginResponse,
+ IOrganizationTeam,
+ IRegisterDataAPI,
+ ISigninEmailConfirmResponse,
+ ISuccessResponse,
+ IUser
+} from '@/core/types/interfaces';
+import { getRefreshTokenCookie, setAccessTokenCookie } from '@/core/lib/helpers/cookies';
+import {
+ APP_LOGO_URL,
+ APP_NAME,
+ APP_SIGNATURE,
+ GAUZY_API_BASE_SERVER_URL,
+ INVITE_CALLBACK_PATH,
+ VERIFY_EMAIL_CALLBACK_PATH,
+ VERIFY_EMAIL_CALLBACK_URL
+} from '@/core/constants/config/constants';
+import { ProviderEnum } from '@/core/services/server/requests/o-auth';
+import { signinService } from './signin.service';
+import { userService } from '../users';
+
+class AuthService extends APIService {
+ refreshToken = async () => {
+ const refresh_token = getRefreshTokenCookie();
+
+ if (GAUZY_API_BASE_SERVER_URL.value) {
+ const { data } = await this.post<{ token: string }>('/auth/refresh-token', {
+ refresh_token
+ });
+
+ setAccessTokenCookie(data.token);
+
+ return userService.getAuthenticatedUserData();
+ }
+
+ const api = await getFallbackAPI();
+ return api.post<ILoginResponse>(`/auth/refresh`, {
+ refresh_token
+ });
+ };
greptile
logic: refreshToken method does not handle failed refresh token requests - could lead to infinite loops if token is invalid
```suggestion
refreshToken = async () => {
const refresh_token = getRefreshTokenCookie();
+ if (!refresh_token) {
+ throw new Error('No refresh token available');
}
if (GAUZY_API_BASE_SERVER_URL.value) {
+ try {
const { data } = await this.post<{ token: string }>('/auth/refresh-token', {
refresh_token
});
setAccessTokenCookie(data.token);
return userService.getAuthenticatedUserData();
+ } catch (error) {
+ throw new Error('Failed to refresh token');
}
}
const api = await getFallbackAPI();
return api.post<ILoginResponse>(`/auth/refresh`, {
refresh_token
});
};
```
diff block
titles: string[];
items: Record<string, Array<{ title: string; href: string }>>;
}) {
- const [activeTab, setActiveTab] = useState(titles[0]);
+ const router = useRouter();
+ const searchParams = useSearchParams();
+
+ const slugify = (str: string) =>
+ str
+ .toLowerCase()
+ .replace(/[^a-z0-9]+/g, "-")
+ .replace(/^-|-$/g, "");
+
+ const listParam = searchParams?.get("list") ?? "";
+ const initialTab = useMemo(() => {
+ const match = titles.find((t) => slugify(t) === slugify(listParam));
+ return match ?? titles[0];
+ }, [listParam, titles]);
+
+ const [activeTab, setActiveTab] = useState(initialTab);
+
+ useEffect(() => {
+ const match = titles.find((t) => slugify(t) === slugify(listParam));
+ if (match && match !== activeTab) {
+ setActiveTab(match);
+ }
+ }, [activeTab, listParam, titles]);
greptile
logic: Dependency array includes activeTab which could cause unnecessary re-renders and potential infinite loops. Remove activeTab from dependencies.
suggested fix
useEffect(() => { const match = titles.find((t) => slugify(t) === slugify(listParam)); if (match && match !== activeTab) { setActiveTab(match); } }, [listParam, titles]);
diff block
+import { execSync, spawn } from "child_process";
+
+export type ScriptOptions = Partial<{
+ language: "AppleScript" | "JXA";
+ command: string;
+ stderrCallback: (data: string) => void;
+ timeout: number;
+ leadingArgs: (string | boolean | number)[];
+ trailingArgs: (string | boolean | number)[];
+ logDebugMessages: boolean;
+ logErrors: boolean;
+ logFinalOutput: boolean;
+ logIntermediateOutput: boolean;
+ logSentMessages: boolean;
+}>;
+
+/**
+ * Executes an OSA script using the `osascript` command.
+ * @param script The script to execute (either a path to a file or the script itself)
+ * @param trailingArgs The arguments to pass to the script
+ * @param language The language of the script, defaults to AppleScript
+ * @returns A promise that resolves to the output of the script.
+ */
+export function runScript<T>(
+ script: string,
+ options?: ScriptOptions,
+): { data: Promise<T | string>; sendMessage: (msg: string) => void } {
+ let command = options?.command;
+ const language = options?.language;
+ const scriptArgs = [...(options?.leadingArgs?.map((x) => x.toString()) || [])];
+ const trailingArgs = options?.trailingArgs || [];
+ if (!command && (language === undefined || language === "AppleScript" || language === "JXA")) {
+ command = "/usr/bin/osascript";
+ scriptArgs.push(
+ "-l",
+ language === "JXA" ? "JavaScript" : "AppleScript",
+ ...(script.startsWith("/") ? [] : ["-e"]),
+ script,
+ ...trailingArgs.map((x) => x.toString()),
+ );
+ }
+
+ const env = process.env;
+ if (options?.command) {
+ env.PATH = `${env.PATH}:${execSync(`$(/bin/bash -lc 'echo $SHELL') -lc 'echo "$PATH"'`).toString()}`;
+ command = options.command;
+ scriptArgs.push(script, ...trailingArgs.map((x) => x.toString()));
+ }
+
+ if (!command) {
+ throw new Error("No command specified.");
+ }
+
+ let data = "";
+ let sendMessage: (msg: string) => void = (msg: string) => {
+ console.log(msg);
+ };
+
+ const proc = spawn(command, scriptArgs, { env });
+
+ if (options?.logDebugMessages) console.log(`Running shell command "${command} ${scriptArgs.join(" ")}"`);
+
+ proc.stdout?.on("data", (chunk) => {
+ data += chunk.toString();
+ if (options?.logIntermediateOutput) console.log(`Data from script: ${data}`);
+ });
+
+ proc.stderr?.on("data", (chunk) => {
+ if (options?.stderrCallback) {
+ options.stderrCallback(chunk.toString());
+ }
+ if (options?.logErrors) console.error(chunk.toString());
+ });
+
+ proc.stdin.on("error", (err) => {
+ if (options?.logErrors) console.error(`Error writing to stdin: ${err}`);
+ });
+
+ sendMessage = async (message: string) => {
+ if (message?.length > 0) {
+ proc.stdin.cork();
+ proc.stdin.write(`${message}\r\n`);
+ proc.stdin.pipe(proc.stdin, { end: false });
greptile
logic: piping stdin to itself could cause infinite loop or memory issues
```suggestion
proc.stdin.write(`${message}\r\n`);
```
diff block
import { useEffect } from 'react';
import { useRecoilValue } from 'recoil';
-import { useSetRecordValue } from '@/object-record/record-store/contexts/RecordFieldValueSelectorContext';
+import {
+ useRecordValue,
+ useSetRecordValue,
+} from '@/object-record/record-store/contexts/RecordFieldValueSelectorContext';
import { recordStoreFamilyState } from '@/object-record/record-store/states/recordStoreFamilyState';
// TODO: should be optimized and put higher up
export const RecordValueSetterEffect = ({ recordId }: { recordId: string }) => {
- const setRecordValue = useSetRecordValue();
+ const setRecordValueInContextSelector = useSetRecordValue();
- const recordValue = useRecoilValue(recordStoreFamilyState(recordId));
+ const recordValueFromContextSelector = useRecordValue(recordId);
+
+ const recordValueFromRecoil = useRecoilValue(
+ recordStoreFamilyState(recordId),
+ );
useEffect(() => {
- setRecordValue(recordId, recordValue);
- }, [setRecordValue, recordValue, recordId]);
+ //if (!isDeeplyEqual(recordValueFromContextSelector, recordValueFromRecoil)) {
+ setRecordValueInContextSelector(recordId, recordValueFromRecoil);
+ //}
+ }, [
greptile
logic: Removing the deep equality check while keeping recordValueFromContextSelector in dependencies will likely cause an infinite loop. Either uncomment the equality check or remove recordValueFromContextSelector from dependencies.
suggested fix
+ useEffect(() => {
+ if (!isDeeplyEqual(recordValueFromContextSelector, recordValueFromRecoil)) {
setRecordValueInContextSelector(recordId, recordValueFromRecoil);
+ }
}, [
diff block
// MARK: - OnitPanelStateDelegate
extension PanelStateTetheredManager: OnitPanelStateDelegate {
+
func panelBecomeKey(state: OnitPanelState) {
+ func foregroundTrackedWindowIfNeeded(state: OnitPanelState) {
+ guard let panel = state.panel, panel.level != .floating else { return }
+ guard let (trackedWindow, _) = statesByWindow.first(where: { $0.value === state }) else {
+ return
+ }
+
+ trackedWindow.element.bringToFront()
+ handlePanelStateChange(state: state, action: .undefined)
greptile
logic: Calling handlePanelStateChange here could create an infinite loop since it may trigger panelBecomeKey again
diff block
+---
+title: "リファレンス: Workflow.dowhile() | ワークフローの構築 | Mastra ドキュメント"
+description: ワークフロー内の `.dowhile()` メソッドのドキュメント。条件が満たされている間、ステップを実行するループを作成します。
+---
+
+# Workflow.dowhile()
+
+`.dowhile()` メソッドは、条件が満たされている間、ステップを実行するループを作成します。
+
+## 使い方
+
+```typescript
+workflow.dowhile(stepOne, async ({ inputData }) => true);
greptile
style: Example shows a condition that always returns true, which could lead to an infinite loop. Consider showing a more realistic example with a meaningful condition.
diff block
+---
+title: "例: 循環依存関係 | ワークフロー(レガシー) | Mastra ドキュメント"
+description: Mastra を使用して、循環依存関係や条件付きループを含むレガシーワークフローを作成する例。
+---
+
+import { GithubLink } from "@/components/github-link";
+
+# ワークフロー(レガシー)における循環依存
+
+ワークフローは、条件に基づいてステップがループバックできる循環依存をサポートしています。以下の例では、条件付きロジックを使ってループを作成し、繰り返し実行を処理する方法を示しています。
+
+```ts showLineNumbers copy
+import { LegacyWorkflow, LegacyStep } from "@mastra/core/workflows/legacy";
+import { z } from "zod";
+
+async function main() {
+ const doubleValue = new LegacyStep({
+ id: "doubleValue",
+ description: "Doubles the input value",
+ inputSchema: z.object({
+ inputValue: z.number(),
+ }),
+ outputSchema: z.object({
+ doubledValue: z.number(),
+ }),
+ execute: async ({ context }) => {
+ const doubledValue = context.inputValue * 2;
+ return { doubledValue };
+ },
+ });
+
+ const incrementByOne = new LegacyStep({
+ id: "incrementByOne",
+ description: "Adds 1 to the input value",
+ outputSchema: z.object({
+ incrementedValue: z.number(),
+ }),
+ execute: async ({ context }) => {
+ const valueToIncrement = context?.getStepResult<{ firstValue: number }>(
+ "trigger",
+ )?.firstValue;
+ if (!valueToIncrement) throw new Error("No value to increment provided");
+ const incrementedValue = valueToIncrement + 1;
+ return { incrementedValue };
+ },
+ });
+
+ const cyclicalWorkflow = new LegacyWorkflow({
+ name: "cyclical-workflow",
+ triggerSchema: z.object({
+ firstValue: z.number(),
+ }),
+ });
+
+ cyclicalWorkflow
+ .step(doubleValue, {
+ variables: {
+ inputValue: {
+ step: "trigger",
+ path: "firstValue",
+ },
+ },
+ })
+ .then(incrementByOne)
+ .after(doubleValue)
+ .step(doubleValue, {
+ variables: {
+ inputValue: {
+ step: doubleValue,
+ path: "doubledValue",
+ },
+ },
+ })
greptile
logic: This creates an infinite loop since there's no condition to break the cycle. Should add a when condition or use .until() instead
diff block
+---
+title: "リファレンス: Workflow.dountil() | ワークフローの構築 | Mastra ドキュメント"
+description: ワークフロー内の `.dountil()` メソッドのドキュメント。条件が満たされるまでステップを実行するループを作成します。
+---
+
+# Workflow.dountil()
+
+`.dountil()` メソッドは、条件が満たされるまでステップを繰り返し実行するループを作成します。
+
+## 使い方
+
+```typescript
+workflow.dountil(stepOne, async ({ inputData }) => true);
greptile
style: Example shows condition that always returns true, which would create an infinite loop. Consider using a more realistic example that shows a practical termination condition.
diff block
+#!/usr/bin/env bash
+
+set -e
+
+
+# 2. More thorough container cleanup with proper error handling
+echo "Stopping and removing global-l0 containers..."
+
+
+cleanup_container() {
+ local name=$1
+ local vol=$2
+ docker stop $name 2>/dev/null || true
+ docker rm -f $name 2>/dev/null || true
+ docker volume rm ${vol} 2>/dev/null || true
+}
+
+cleanup() {
+ for i in 0 1 2; do
+ cleanup_container global-l0-$i gl0-data-$i &
+ cleanup_container dag-l1-$i dag-l1-data-$i &
+ done
+ LAST_PID=$!
+ wait $LAST_PID
+}
+
+cleanup &
+export CLEANUP_PID=$!
+# 8. Remove the network with better error handling and retry logic
+echo "Removing tessellation_common network..."
+while true; do
+ output=$(docker network rm tessellation_common 2>&1) || true
+ if [[ $output == *"not found"* ]]; then
+ echo "Network removed successfully"
+ break
+ elif [[ $output != *"has active endpoints"* ]]; then
+ # If the error message is not present, break the loop
+ echo "Network removed successfully or encountered a different error. Output below"
+ echo $output
+ break
+ fi
+ echo "Network has active endpoints, retrying in 1 second..."
+ sleep 1
+done
greptile
style: Network removal loop needs a maximum retry limit to prevent infinite loops
suggested fix
+max_retries=30
+retry_count=0
+while [ $retry_count -lt $max_retries ]; do
output=$(docker network rm tessellation_common 2>&1) || true
if [[ $output == *"not found"* ]]; then
echo "Network removed successfully"
break
elif [[ $output != *"has active endpoints"* ]]; then
# If the error message is not present, break the loop
echo "Network removed successfully or encountered a different error. Output below"
echo $output
break
fi
+ echo "Network has active endpoints, retrying in 1 second... (attempt $((retry_count + 1))/$max_retries)"
sleep 1
+ ((retry_count++))
done
diff block
exception_type=exception_type, query_type=query_type, workload=workload.value, chargeable=chargeable
).inc()
err = wrap_query_error(e)
+ if isinstance(err, ClickhouseAtCapacity) and is_personal_api_key and workload == Workload.OFFLINE:
+ workload = Workload.ONLINE
+ tags["clickhouse_exception_type"] = exception_type
+ tags["workload"] = str(workload)
+ continue
greptile
logic: This retry logic could lead to infinite loops if the online cluster also returns capacity errors. Consider adding a retry limit or tracking retry attempts.
diff block
+use std::{
+ collections::{HashMap, HashSet},
+ sync::{
+ atomic::{AtomicU64, Ordering},
+ Arc,
+ },
+ time::{Duration, Instant},
+};
+
+use log::*;
+use magicblock_rpc_client::MagicblockRpcClient;
+use solana_pubkey::Pubkey;
+use solana_sdk::{
+ address_lookup_table::state::AddressLookupTable,
+ commitment_config::CommitmentConfig, message::AddressLookupTableAccount,
+ signature::Keypair, signer::Signer,
+};
+use tokio::{
+ sync::{Mutex, RwLock},
+ time::sleep,
+};
+
+use crate::{
+ error::{TableManiaError, TableManiaResult},
+ lookup_table_rc::{LookupTableRc, MAX_ENTRIES_AS_PART_OF_EXTEND},
+};
+
+// -----------------
+// GarbageCollectorConfig
+// -----------------
+
+/// Configures the Garbage Collector which deactivates and then closes
+/// lookup tables whose pubkeys have been released.
+#[derive(Debug, Clone)]
+pub struct GarbageCollectorConfig {
+ /// The interval at which to check for tables to deactivate.
+ pub deactivate_interval_ms: u64,
+ /// The interval at which to check for deactivated tables to close.
+ pub close_interval_ms: u64,
+}
+
+impl Default for GarbageCollectorConfig {
+ fn default() -> Self {
+ Self {
+ deactivate_interval_ms: 1_000,
+ close_interval_ms: 5_000,
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct TableMania {
+ pub active_tables: Arc<RwLock<Vec<LookupTableRc>>>,
+ released_tables: Arc<Mutex<Vec<LookupTableRc>>>,
+ authority_pubkey: Pubkey,
+ pub rpc_client: MagicblockRpcClient,
+ randomize_lookup_table_slot: bool,
+}
+
+impl TableMania {
+ pub fn new(
+ rpc_client: MagicblockRpcClient,
+ authority: &Keypair,
+ garbage_collector_config: Option<GarbageCollectorConfig>,
+ ) -> Self {
+ let me = Self {
+ active_tables: Arc::<RwLock<Vec<LookupTableRc>>>::default(),
+ released_tables: Arc::<Mutex<Vec<LookupTableRc>>>::default(),
+ authority_pubkey: authority.pubkey(),
+ rpc_client,
+ randomize_lookup_table_slot: randomize_lookup_table_slot(),
+ };
+ if let Some(config) = garbage_collector_config {
+ Self::launch_garbage_collector(
+ &me.rpc_client,
+ authority,
+ me.released_tables.clone(),
+ config,
+ );
+ }
+ me
+ }
+
+ /// Returns the number of currently active tables
+ pub async fn active_tables_count(&self) -> usize {
+ self.active_tables.read().await.len()
+ }
+
+ /// Returns the number of released tables
+ pub async fn released_tables_count(&self) -> usize {
+ self.released_tables.lock().await.len()
+ }
+
+ /// Returns the addresses of all tables currently active
+ pub async fn active_table_addresses(&self) -> Vec<Pubkey> {
+ let mut addresses = Vec::new();
+
+ for table in self.active_tables.read().await.iter() {
+ addresses.push(*table.table_address());
+ }
+
+ addresses
+ }
+
+ /// Returns the addresses of all released tables
+ pub async fn released_table_addresses(&self) -> Vec<Pubkey> {
+ self.released_tables
+ .lock()
+ .await
+ .iter()
+ .map(|table| *table.table_address())
+ .collect()
+ }
+
+ /// Returns the addresses stored accross all active tables
+ pub async fn active_table_pubkeys(&self) -> Vec<Pubkey> {
+ let mut pubkeys = Vec::new();
+ for table in self.active_tables.read().await.iter() {
+ if let Some(pks) = table.pubkeys() {
+ pubkeys.extend(pks.keys());
+ }
+ }
+ pubkeys
+ }
+
+ // -----------------
+ // Reserve
+ // -----------------
+ pub async fn reserve_pubkeys(
+ &self,
+ authority: &Keypair,
+ pubkeys: &HashSet<Pubkey>,
+ ) -> TableManiaResult<()> {
+ let mut remaining = HashSet::new();
+ // 1. Add reservations for pubkeys that are already in one of the tables
+ for pubkey in pubkeys {
+ if !self.reserve_pubkey(pubkey).await {
+ remaining.insert(*pubkey);
+ }
+ }
+
+ // 2. Add new reservations for pubkeys that are not in any table
+ self.reserve_new_pubkeys(authority, &remaining).await
+ }
+
+ /// Tries to find a table that holds this pubkey already and reserves it.
+ /// - *pubkey* to reserve
+ /// - *returns* `true` if the pubkey could be reserved
+ async fn reserve_pubkey(&self, pubkey: &Pubkey) -> bool {
+ for table in self.active_tables.read().await.iter() {
+ if table.reserve_pubkey(pubkey) {
+ trace!(
+ "Added reservation for pubkey {} to table {}",
+ pubkey,
+ table.table_address()
+ );
+ return true;
+ }
+ }
+ trace!("No table found for which we can reserve pubkey {}", pubkey);
+ false
+ }
+
+ /// Reserves pubkeys that haven't been found in any of the active tables.
+ /// Thus this is considered the first reservation for these pubkeys and thus includes
+ /// initializing/extending actual lookup tables on chain.
+ async fn reserve_new_pubkeys(
+ &self,
+ authority: &Keypair,
+ pubkeys: &HashSet<Pubkey>,
+ ) -> TableManiaResult<()> {
+ self.check_authority(authority)?;
+
+ let mut remaining = pubkeys.iter().cloned().collect::<Vec<_>>();
+ let mut tables_used = HashSet::new();
+
+ // Keep trying to store pubkeys until we're done
+ while !remaining.is_empty() {
+ // First try to use existing tables
+ let mut stored_in_existing = false;
+ {
+ // Taking a write lock here to prevent multiple tasks from
+ // updating tables at the same time
+ let active_tables_write_lock = self.active_tables.write().await;
+
+ // Try to use the last table if it's not full
+ if let Some(table) = active_tables_write_lock.last() {
+ if !table.is_full() {
+ self.extend_table(
+ table,
+ authority,
+ &mut remaining,
+ &mut tables_used,
+ )
+ .await;
+ stored_in_existing = true;
+ }
+ }
+ }
+
+ // If we couldn't use existing tables, we need to create a new one
+ if !stored_in_existing && !remaining.is_empty() {
+ // We write lock the active tables to ensure that while we create a new
+ // table the requests looking for an existing table to extend are blocked
+ let mut active_tables_write_lock =
+ self.active_tables.write().await;
+
+ // Double-check if a new table was created while we were waiting for the lock
+ if let Some(table) = active_tables_write_lock.last() {
+ if !table.is_full() {
+ // Another task created a table we can use, so drop the write lock
+ // and try again with the read lock
+ drop(active_tables_write_lock);
+ continue;
+ }
+ }
+
+ // Create a new table and add it to active_tables
+ let table = self
+ .create_new_table_and_extend(authority, &mut remaining)
+ .await?;
+
+ tables_used.insert(*table.table_address());
+ active_tables_write_lock.push(table);
+ }
+
+ // If we've stored all pubkeys, we're done
+ if remaining.is_empty() {
+ break;
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Extends the table to store as many of the provided pubkeys as possile.
+ /// The stored pubkeys are removed from the `remaining` vector.
+ /// If successful the table addres is added to the `tables_used` set.
+ /// Returns `true` if the table is full after adding the pubkeys
+ async fn extend_table(
+ &self,
+ table: &LookupTableRc,
+ authority: &Keypair,
+ remaining: &mut Vec<Pubkey>,
+ tables_used: &mut HashSet<Pubkey>,
+ ) {
+ let remaining_len = remaining.len();
+ let storing_len =
+ remaining_len.min(MAX_ENTRIES_AS_PART_OF_EXTEND as usize);
+ trace!(
+ "Adding {}/{} pubkeys to existing table {}",
+ storing_len,
+ remaining_len,
+ table.table_address()
+ );
+ let table_addresses_count = table.pubkeys().unwrap().len();
+
+ let storing = remaining[..storing_len].to_vec();
+ match table
+ .extend_respecting_capacity(&self.rpc_client, authority, &storing)
+ .await
+ {
+ Ok(stored) => {
+ trace!("Stored {}", stored.len());
+ tables_used.insert(*table.table_address());
+ remaining.retain(|pk| !stored.contains(pk));
+ }
+ // TODO: this could cause us to loop forever as remaining
+ // is never updated, possibly we need to return an error
+ // here instead
+ Err(err) => error!(
greptile
logic: Error handling needs to be improved here. The TODO comment correctly identifies that this could cause an infinite loop since remaining is not updated on error. This should return an error instead of just logging it.
diff block
-#!/bin/bash
-
-echo "🔧 Checking dependencies..."
-if ! command -v docker &>/dev/null; then
- echo -e "\t❌ Docker is not installed or not in PATH. Please install Docker first.\n\t\tSee https://docs.docker.com/get-docker/"
- exit 1
-fi
-# Check if docker compose plugin is installed
-if ! docker compose version &>/dev/null; then
- echo -e "\t❌ Docker Compose is not installed or not in PATH (n.b. docker-compose is deprecated)\n\t\tUpdate docker or install docker-compose-plugin\n\t\tOn Linux: sudo apt-get install docker-compose-plugin\n\t\tSee https://docs.docker.com/compose/install/"
- exit 1
-fi
-# Check if docker is started
-if ! docker info &>/dev/null; then
- echo -e "\t❌ Docker is not running.\n\t\tPlease start Docker Desktop, Docker or check documentation at https://docs.docker.com/config/daemon/start/"
- exit 1
-fi
-if ! command -v curl &>/dev/null; then
- echo -e "\t❌ Curl is not installed or not in PATH.\n\t\tOn macOS: brew install curl\n\t\tOn Linux: sudo apt install curl"
- exit 1
-fi
-
-# Check if docker compose version is >= 2
-if [ "$(docker compose version --short | cut -d' ' -f3 | cut -d'.' -f1)" -lt 2 ]; then
- echo -e "\t❌ Docker Compose is outdated. Please update Docker Compose to version 2 or higher.\n\t\tSee https://docs.docker.com/compose/install/linux/"
- exit 1
-fi
-# Check if docker-compose is installed, if so issue a warning if version is < 2
-if command -v docker-compose &>/dev/null; then
- if [ "$(docker-compose version --short | cut -d' ' -f3 | cut -d'.' -f1)" -lt 2 ]; then
- echo -e "\n\t⚠️ 'docker-compose' is installed but outdated. Make sure to use 'docker compose' or to upgrade 'docker-compose' to version 2.\n\t\tSee https://docs.docker.com/compose/install/standalone/\n"
- fi
-fi
-
-# Catch errors
-set -e
-function on_exit {
- # $? is the exit status of the last command executed
- local exit_status=$?
- if [ $exit_status -ne 0 ]; then
- echo "❌ Something went wrong, exiting: $exit_status"
- fi
-}
-trap on_exit EXIT
-
-# Use environment variables VERSION and BRANCH, with defaults if not set
-version=${VERSION:-$(curl -s "https://hub.docker.com/v2/repositories/twentycrm/twenty/tags" | grep -o '"name":"[^"]*"' | grep -v 'latest' | cut -d'"' -f4 | sort -V | tail -n1)}
-branch=${BRANCH:-$(curl -s https://api.github.com/repos/twentyhq/twenty/tags | grep '"name":' | head -n 1 | cut -d '"' -f 4)}
-
-echo "🚀 Using docker version $version and Github branch $branch"
-
-dir_name="twenty"
-function ask_directory {
- read -p "📁 Enter the directory name to setup the project (default: $dir_name): " answer
- if [ -n "$answer" ]; then
- dir_name=$answer
- fi
-}
-
-ask_directory
-
-while [ -d "$dir_name" ]; do
- read -p "🚫 Directory '$dir_name' already exists. Do you want to overwrite it? (y/N) " answer
- if [ "$answer" = "y" ]; then
- break
- else
- ask_directory
- fi
-done
-
-# Create a directory named twenty
-echo "📁 Creating directory '$dir_name'"
-mkdir -p "$dir_name" && cd "$dir_name" || { echo "❌ Failed to create/access directory '$dir_name'"; exit 1; }
-
-# Copy twenty/packages/twenty-docker/docker-compose.yml in it
-echo -e "\t• Copying docker-compose.yml"
-curl -sLo docker-compose.yml https://raw.githubusercontent.com/twentyhq/twenty/$branch/packages/twenty-docker/docker-compose.yml
-
-# Copy twenty/packages/twenty-docker/.env.example to .env
-echo -e "\t• Setting up .env file"
-curl -sLo .env https://raw.githubusercontent.com/twentyhq/twenty/$branch/packages/twenty-docker/.env.example
-
-# Replace TAG=latest by TAG=<latest_release or version input>
-if [[ $(uname) == "Darwin" ]]; then
- # Running on macOS
- sed -i '' "s/TAG=latest/TAG=$version/g" .env
-else
- # Assuming Linux
- sed -i'' "s/TAG=latest/TAG=$version/g" .env
-fi
-
-# Generate random strings for secrets
-echo "# === Randomly generated secret ===" >> .env
-echo "APP_SECRET=$(openssl rand -base64 32)" >> .env
-
-echo "" >> .env
-echo "PG_DATABASE_PASSWORD=$(openssl rand -hex 16)" >> .env
-
-echo -e "\t• .env configuration completed"
-
-port=3000
-# Check if command nc is available
-if command -v nc &> /dev/null; then
- # Check if port 3000 is already in use, propose to change it
- while nc -zv localhost $port &>/dev/null; do
- read -p "🚫 Port $port is already in use. Do you want to use another port? (Y/n) " answer
- if [ "$answer" = "n" ]; then
- continue
- fi
- read -p "Enter a new port number: " new_port
- if [[ $(uname) == "Darwin" ]]; then
- sed -i '' "s/$port:$port/$new_port:$port/g" docker-compose.yml
- sed -E -i '' "s|^SERVER_URL=http://localhost:[0-9]+|SERVER_URL=http://localhost:$new_port|g" .env
- else
- sed -i'' "s/$port:$port/$new_port:$port/g" docker-compose.yml
- sed -E -i'' "s|^SERVER_URL=http://localhost:[0-9]+|SERVER_URL=http://localhost:$new_port|g" .env
- fi
- port=$new_port
- done
-fi
-
-# Ask user if they want to start the project
-read -p "🚀 Do you want to start the project now? (Y/n) " answer
-if [ "$answer" = "n" ]; then
- echo "✅ Project setup completed. Run 'docker compose up -d' to start."
- exit 0
-else
- echo "🐳 Starting Docker containers..."
- docker compose up -d
- # Check if port is listening
- echo "Waiting for server to be healthy, it might take a few minutes while we initialize the database..."
- # Tail logs of the server until it's ready
- docker compose logs -f server &
- pid=$!
- while [ ! $(docker inspect --format='{{.State.Health.Status}}' twenty-server-1) = "healthy" ]; do
- sleep 1
- done
- kill $pid
- echo ""
- echo "✅ Server is up and running"
-fi
-
-function ask_open_browser {
- read -p "🌐 Do you want to open the project in your browser? (Y/n) " answer
- if [ "$answer" = "n" ]; then
- echo "✅ Setup completed. Access your project at http://localhost:$port"
- exit 0
- fi
-}
-
-# Ask user if they want to open the project
-# Running on macOS
-if [[ $(uname) == "Darwin" ]]; then
- ask_open_browser
-
- open "http://localhost:$port"
-# Assuming Linux
-else
- # xdg-open is not installed, we could be running in a non gui environment
- if command -v xdg-open >/dev/null 2>&1; then
- ask_open_browser
-
- xdg-open "http://localhost:$port"
- else
- echo "✅ Setup completed. Your project is available at http://localhost:$port"
- fi
-fi
+#!/bin/bash
+
+echo "🔧 Checking dependencies..."
+if ! command -v docker &>/dev/null; then
+ echo -e "\t❌ Docker is not installed or not in PATH. Please install Docker first.\n\t\tSee https://docs.docker.com/get-docker/"
+ exit 1
+fi
+# Check if docker compose plugin is installed
+if ! docker compose version &>/dev/null; then
+ echo -e "\t❌ Docker Compose is not installed or not in PATH (n.b. docker-compose is deprecated)\n\t\tUpdate docker or install docker-compose-plugin\n\t\tOn Linux: sudo apt-get install docker-compose-plugin\n\t\tSee https://docs.docker.com/compose/install/"
+ exit 1
+fi
+# Check if docker is started
+if ! docker info &>/dev/null; then
+ echo -e "\t❌ Docker is not running.\n\t\tPlease start Docker Desktop, Docker or check documentation at https://docs.docker.com/config/daemon/start/"
+ exit 1
+fi
+if ! command -v curl &>/dev/null; then
+ echo -e "\t❌ Curl is not installed or not in PATH.\n\t\tOn macOS: brew install curl\n\t\tOn Linux: sudo apt install curl"
+ exit 1
+fi
+
+# Check if docker compose version is >= 2
+if [ "$(docker compose version --short | cut -d' ' -f3 | cut -d'.' -f1)" -lt 2 ]; then
+ echo -e "\t❌ Docker Compose is outdated. Please update Docker Compose to version 2 or higher.\n\t\tSee https://docs.docker.com/compose/install/linux/"
+ exit 1
+fi
+# Check if docker-compose is installed, if so issue a warning if version is < 2
+if command -v docker-compose &>/dev/null; then
+ if [ "$(docker-compose version --short | cut -d' ' -f3 | cut -d'.' -f1)" -lt 2 ]; then
+ echo -e "\n\t⚠️ 'docker-compose' is installed but outdated. Make sure to use 'docker compose' or to upgrade 'docker-compose' to version 2.\n\t\tSee https://docs.docker.com/compose/install/standalone/\n"
+ fi
+fi
+
+# Catch errors
+set -e
+function on_exit {
+ # $? is the exit status of the last command executed
+ local exit_status=$?
+ if [ $exit_status -ne 0 ]; then
+ echo "❌ Something went wrong, exiting: $exit_status"
+ fi
+}
+trap on_exit EXIT
+
+# Use environment variables VERSION and BRANCH, with defaults if not set
+version=${VERSION:-$(curl -s "https://hub.docker.com/v2/repositories/twentycrm/twenty/tags" | grep -o '"name":"[^"]*"' | grep -v 'latest' | cut -d'"' -f4 | sort -V | tail -n1)}
+branch=${BRANCH:-$(curl -s https://api.github.com/repos/twentyhq/twenty/tags | grep '"name":' | head -n 1 | cut -d '"' -f 4)}
+
+echo "🚀 Using docker version $version and Github branch $branch"
+
+dir_name="twenty"
+function ask_directory {
+ read -p "📁 Enter the directory name to setup the project (default: $dir_name): " answer
+ if [ -n "$answer" ]; then
+ dir_name=$answer
+ fi
+}
+
+ask_directory
+
+while [ -d "$dir_name" ]; do
+ read -p "🚫 Directory '$dir_name' already exists. Do you want to overwrite it? (y/N) " answer
+ if [ "$answer" = "y" ]; then
+ break
+ else
+ ask_directory
+ fi
+done
+
+# Create a directory named twenty
+echo "📁 Creating directory '$dir_name'"
+mkdir -p "$dir_name" && cd "$dir_name" || { echo "❌ Failed to create/access directory '$dir_name'"; exit 1; }
+
+# Copy twenty/packages/twenty-docker/docker-compose.yml in it
+echo -e "\t• Copying docker-compose.yml"
+curl -sLo docker-compose.yml https://raw.githubusercontent.com/twentyhq/twenty/$branch/packages/twenty-docker/docker-compose.yml
+
+# Copy twenty/packages/twenty-docker/.env.example to .env
+echo -e "\t• Setting up .env file"
+curl -sLo .env https://raw.githubusercontent.com/twentyhq/twenty/$branch/packages/twenty-docker/.env.example
+
+# Replace TAG=latest by TAG=<latest_release or version input>
+if [[ $(uname) == "Darwin" ]]; then
+ # Running on macOS
+ sed -i '' "s/TAG=latest/TAG=$version/g" .env
+else
+ # Assuming Linux
+ sed -i'' "s/TAG=latest/TAG=$version/g" .env
+fi
+
+# Generate random strings for secrets
+echo "# === Randomly generated secret ===" >> .env
+echo "APP_SECRET=$(openssl rand -base64 32)" >> .env
+
+echo "" >> .env
+echo "PG_DATABASE_PASSWORD=$(openssl rand -hex 16)" >> .env
+
+echo -e "\t• .env configuration completed"
+
+port=3000
+# Check if command nc is available
+if command -v nc &> /dev/null; then
+ # Check if port 3000 is already in use, propose to change it
+ while nc -zv localhost $port &>/dev/null; do
+ read -p "🚫 Port $port is already in use. Do you want to use another port? (Y/n) " answer
+ if [ "$answer" = "n" ]; then
+ continue
+ fi
greptile
logic: Continuing the loop after user declines port change could lead to infinite loop if port is in use
suggested fix
if [ "$answer" = "n" ]; then break fi
diff block
+import { memo, useMemo, useRef } from 'react'
+import { Handle, NodeProps, Position, useReactFlow } from 'reactflow'
+import { Trash2 } from 'lucide-react'
+import { StartIcon } from '@/components/icons'
+import { Card } from '@/components/ui/card'
+import { Button } from '@/components/ui/button'
+import { cn } from '@/lib/utils'
+import { useWorkflowStore } from '@/stores/workflows/workflow/store'
+import { ParallelBadges } from './components/parallel-badges'
+import React from 'react'
+
+// Add these styles to your existing global CSS file or create a separate CSS module
+const ParallelNodeStyles: React.FC = () => {
+ return (
+ <style jsx global>{`
+ @keyframes parallel-node-pulse {
+ 0% { box-shadow: 0 0 0 0 rgba(254, 225, 43, 0.3); }
+ 70% { box-shadow: 0 0 0 6px rgba(254, 225, 43, 0); }
+ 100% { box-shadow: 0 0 0 0 rgba(254, 225, 43, 0); }
+ }
+
+ .parallel-node-drag-over {
+ animation: parallel-node-pulse 1.2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
+ border-style: solid !important;
+ background-color: rgba(254, 225, 43, 0.08) !important;
+ box-shadow: 0 0 0 8px rgba(254, 225, 43, 0.1);
+ }
+
+ /* Make resizer handles more visible */
+ .react-flow__resize-control {
+ z-index: 10;
+ pointer-events: all !important;
+ }
+
+ /* Ensure parent borders are visible when hovering over resize controls */
+ .react-flow__node-group:hover,
+ .hover-highlight {
+ border-color: #1e293b !important;
+ }
+
+ /* Ensure hover effects work well */
+ .group-node-container:hover .react-flow__resize-control.bottom-right {
+ opacity: 1 !important;
+ visibility: visible !important;
+ }
+
+ /* React Flow position transitions within parallel blocks */
+ .react-flow__node[data-parent-node-id] {
+ transition: transform 0.05s ease;
+ pointer-events: all;
+ }
+
+ /* Prevent jumpy drag behavior */
+ .parallel-drop-container .react-flow__node {
+ transform-origin: center;
+ position: absolute;
+ }
+
+ /* Remove default border from React Flow group nodes */
+ .react-flow__node-group {
+ border: none;
+ background-color: transparent;
+ outline: none;
+ box-shadow: none;
+ }
+
+ /* Ensure child nodes stay within parent bounds */
+ .react-flow__node[data-parent-node-id] .react-flow__handle {
+ z-index: 30;
+ }
+
+ /* Enhanced drag detection */
+ .react-flow__node-group.dragging-over {
+ background-color: rgba(254,225,43,0.05);
+ transition: all 0.2s ease-in-out;
+ }
+ `}</style>
+ )
+}
+
+export const ParallelNodeComponent = memo(({ data, selected, id }: NodeProps) => {
+ const { getNodes } = useReactFlow();
+ const blockRef = useRef<HTMLDivElement>(null);
+
+ // Determine nesting level by counting parents
+ const nestingLevel = useMemo(() => {
+ let level = 0;
+ let currentParentId = data?.parentId;
+
+ while (currentParentId) {
+ level++;
+ const parentNode = getNodes().find(n => n.id === currentParentId);
+ if (!parentNode) break;
+ currentParentId = parentNode.data?.parentId;
+ }
greptile
logic: potential infinite loop if there's a circular parent reference in the node structure
diff block
+from copy import deepcopy
+from typing import Any
+from posthog.schema_migrations import LATEST_VERSIONS, MIGRATIONS
+
+
+def upgrade(query: dict) -> dict:
+ return upgrade_node(query)
+
+
+def upgrade_node(node: Any) -> Any:
+ if isinstance(node, list):
+ return [upgrade_node(item) for item in node]
+
+ if isinstance(node, tuple):
+ return tuple(upgrade_node(item) for item in node)
+
+ if isinstance(node, dict):
+ if "kind" in node and node["kind"] in LATEST_VERSIONS:
+ while (v := (node.get("v") or 1)) < LATEST_VERSIONS[node["kind"]]:
+ node = MIGRATIONS[node["kind"]][v](deepcopy(node))
greptile
logic: Potential infinite loop if MIGRATIONS[node["kind"]][v] is missing for any version between v and LATEST_VERSIONS[node["kind"]]
suggested fix
while (v := (node.get("v") or 1)) < LATEST_VERSIONS[node["kind"]]:
+ if v not in MIGRATIONS[node["kind"]]:
+ raise ValueError(f"Missing migration handler for {node['kind']} version {v}")
node = MIGRATIONS[node["kind"]][v](deepcopy(node))
diff block
});
}
}, [pastHours]);
+
+ useEffect(() => {
+ if (calendarDate?.from) {
+ const from = new Date(calendarDate.from);
+ from.setHours(parseInt(startTime.hour));
+ from.setMinutes(parseInt(startTime.minute));
+
+ const newRange: ReactDateRange = { from };
+
+ if (calendarDate.to) {
+ const to = new Date(calendarDate.to);
+ to.setHours(parseInt(endTime.hour));
+ to.setMinutes(parseInt(endTime.minute));
+ newRange.to = to;
+ }
+
+ setCalendarDate(newRange);
+ }
+ }, [startTime, endTime]);
greptile
logic: This useEffect creates an infinite loop - changing startTime/endTime triggers calendarDate update, which then triggers startTime/endTime updates again
suggested fix
useEffect(() => {
+ if (!calendarDate?.from) return;
+ // Only update if the hours/minutes are different to avoid loops
+ const currentHours = calendarDate.from.getHours();
+ const currentMinutes = calendarDate.from.getMinutes();
+ const newHours = parseInt(startTime.hour);
+ const newMinutes = parseInt(startTime.minute);
+ if (currentHours === newHours && currentMinutes === newMinutes) return;
const from = new Date(calendarDate.from);
+ from.setHours(newHours);
+ from.setMinutes(newMinutes);
const newRange: ReactDateRange = { from };
if (calendarDate.to) {
const to = new Date(calendarDate.to);
to.setHours(parseInt(endTime.hour));
to.setMinutes(parseInt(endTime.minute));
newRange.to = to;
}
setCalendarDate(newRange);
}, [startTime, endTime]);
diff block
+import json
+import logging
+import os
+import tempfile
+import time
+import uuid
+from datetime import datetime
+from typing import Any, Dict
+
+import tenacity
+from openai import (
+ APIConnectionError,
+ APIStatusError,
+ APITimeoutError,
+ OpenAI,
+ RateLimitError,
+)
+from rich.console import Console
+
+# from logger import logger as global_logger_instance # DEPRECATED
+from batchgrader.utils import get_encoder
+
+logger = logging.getLogger(__name__)
+
+
+def get_config_value(config: Dict[str, Any],
+ key: str,
+ default: Any = None) -> Any:
+ """
+ Helper function to safely extract values from config dict
+ """
+ return config.get(key, default)
+
+
+class SimulatedChunkFailureError(Exception):
+ pass
+
+
+class LLMClient:
+
+ def __init__(self, model=None, api_key=None, endpoint=None, config=None):
+ self.logger = logging.getLogger(__name__)
+ self.config = config or {}
+
+ # Extract values from injected config or use provided parameters
+ self.api_key = api_key or get_config_value(self.config,
+ "openai_api_key")
+ self.model = model or get_config_value(self.config,
+ "openai_model_name")
+ self.endpoint = endpoint or get_config_value(self.config,
+ "batch_api_endpoint")
+ self.max_tokens = int(
+ get_config_value(self.config, "max_tokens_per_response", 1000))
+ self.poll_interval = int(
+ get_config_value(self.config, "poll_interval_seconds", 60))
+
+ # Create client instance with the API key
+ self.client = OpenAI(api_key=self.api_key)
+ self.logger.info(
+ f"LLMClient initialized. Model: {self.model}, Endpoint: {self.endpoint}"
+ )
+
+ # Retry settings initialization
+ self.retry_settings = get_config_value(self.config, "retry_settings",
+ {})
+ self.max_retries = self.retry_settings.get("max_retries", 3)
+ self.initial_backoff = self.retry_settings.get(
+ "initial_backoff_seconds", 1)
+ self.max_backoff = self.retry_settings.get("max_backoff_seconds", 60)
+
+ self.retry_decorator = tenacity.retry(
+ stop=tenacity.stop_after_attempt(self.max_retries),
+ wait=tenacity.wait_exponential(multiplier=1,
+ min=self.initial_backoff,
+ max=self.max_backoff),
+ retry=(tenacity.retry_if_exception_type(APIConnectionError)
+ | tenacity.retry_if_exception_type(RateLimitError)
+ | tenacity.retry_if_exception_type(APITimeoutError)
+ | tenacity.retry_if_exception(lambda e: isinstance(
+ e, APIStatusError) and e.status_code >= 500)),
+ before_sleep=tenacity.before_sleep_log(self.logger,
+ logging.WARNING),
+ reraise=True,
+ )
+ self.logger.info(
+ f"LLMClient retry decorator configured: "
+ f"max_retries={self.max_retries}, initial_backoff={self.initial_backoff}s, max_backoff={self.max_backoff}s"
+ )
+
+ # Initialize encoder using centralized utility
+ self.encoder = get_encoder(self.model)
+
+ def _prepare_batch_requests(self, df, system_prompt_content,
+ response_field_name):
+ from .exceptions import FileFormatError
+ requests = []
+ if response_field_name not in df.columns:
+ raise FileFormatError(
+ f"Required column '{response_field_name}' not found in input DataFrame. "
+ f"Columns present: {list(df.columns)}")
+ df["custom_id"] = [str(uuid.uuid4()) for _ in range(len(df))]
+ for _, row in df.iterrows():
+ custom_id = row["custom_id"]
+ text_to_evaluate = str(row[response_field_name])
+ messages = [
+ {
+ "role": "system",
+ "content": system_prompt_content
+ },
+ {
+ "role":
+ "user",
+ "content":
+ f"Please evaluate the following text: {text_to_evaluate}",
+ },
+ ]
+ body = {
+ "model": self.model,
+ "messages": messages,
+ "max_tokens": self.max_tokens,
+ }
+ requests.append({
+ "custom_id": custom_id,
+ "method": "POST",
+ "url": self.endpoint,
+ "body": body,
+ })
+ return requests, df
+
+ def _upload_batch_input_file(self, requests_data,
+ base_filename_for_tagging):
+ temp_file_path = None
+ try:
+ with tempfile.NamedTemporaryFile(
+ mode="w+",
+ delete=False,
+ suffix=".jsonl",
+ prefix=f"{base_filename_for_tagging}_",
+ ) as tmp_f:
+ temp_file_path = tmp_f.name
+ for request_item in requests_data:
+ tmp_f.write(json.dumps(request_item) + "\n")
+ with open(temp_file_path, "rb") as f_rb:
+
+ @self.retry_decorator
+ def _do_upload():
+ self.logger.info(
+ f"Uploading batch input file: {temp_file_path} for {base_filename_for_tagging}"
+ )
+ return self.client.files.create(file=f_rb, purpose="batch")
+
+ batch_input_file = _do_upload()
+ self.logger.info(
+ f"Successfully uploaded batch input file {batch_input_file.id} for {base_filename_for_tagging}"
+ )
+ return batch_input_file.id
+ finally:
+ if temp_file_path and os.path.exists(temp_file_path):
+ os.remove(temp_file_path)
+
+ def _manage_batch_job(self, input_file_id, source_filename):
+ console = Console()
+ console.print(
+ f"Creating batch job for {source_filename} with file ID: {input_file_id}"
+ )
+
+ @self.retry_decorator
+ def _create_batch_job():
+ self.logger.info(
+ f"Attempting to create batch job for input_file_id: {input_file_id}"
+ )
+ return self.client.batches.create(
+ input_file_id=input_file_id,
+ endpoint=self.endpoint,
+ completion_window="24h",
+ metadata={"source_file": source_filename},
+ )
+
+ batch_job = _create_batch_job()
+ self.logger.info(
+ f"Batch job {batch_job.id} created successfully for {source_filename}."
+ )
+
+ last_status = None
+ terminal_statuses = ["completed", "failed", "expired", "cancelled"]
+
+ @self.retry_decorator # Decorate the retrieve call
+ def _retrieve_batch_status(job_id):
+ self.logger.debug(
+ f"Attempting to retrieve status for batch job: {job_id}")
+ return self.client.batches.retrieve(job_id)
+
+ while True:
+ retrieved_batch = _retrieve_batch_status(
+ batch_job.id) # Call moved inside the main polling loop
+ status = retrieved_batch.status
+ status_line = f"[{datetime.now():%y/%m/%d %H:%M:%S}] INFO Batch job {batch_job.id} status: {status}"
+ if status not in terminal_statuses:
+ if status == last_status:
+ console.print(status_line,
+ end="\r",
+ highlight=False,
+ soft_wrap=True)
+ else:
+ console.print(status_line)
+ else:
+ console.print(status_line)
+ return retrieved_batch
+ last_status = status
+ time.sleep(self.poll_interval)
greptile
logic: The polling loop should have a maximum duration or timeout to prevent infinite loops if the API never returns a terminal status
suggested fix
+ start_time = time.time()
+ max_duration = 24 * 60 * 60 # 24 hours max duration
while True:
+ if time.time() - start_time > max_duration:
+ raise TimeoutError(f"Batch job {batch_job.id} polling exceeded maximum duration of 24 hours")
retrieved_batch = _retrieve_batch_status(
batch_job.id) # Call moved inside the main polling loop
status = retrieved_batch.status
status_line = f"[{datetime.now():%y/%m/%d %H:%M:%S}] INFO Batch job {batch_job.id} status: {status}"
if status not in terminal_statuses:
if status == last_status:
console.print(status_line,
end="\r",
highlight=False,
soft_wrap=True)
else:
console.print(status_line)
else:
console.print(status_line)
return retrieved_batch
last_status = status
time.sleep(self.poll_interval)
diff block
// TODO: immediately die? or just keep going?
})
+ this.batchConsumer.consumer.on('disconnected', async (err) => {
+ // since we can't be guaranteed that the consumer will be stopped before some other code calls disconnect
+ // we need to listen to disconnect and make sure we're stopped
+ logger.info('🔁', 'blob_ingester_consumer_v2 batch consumer disconnected, cleaning up', { err })
+ await this.stop()
+ })
greptile
logic: Recursive stop call in disconnected handler could cause infinite loop if stop() triggers disconnect
diff block
import { AppBasePath } from '@/types/AppBasePath';
import { isNonEmptyString } from '@sniptt/guards';
import { isDefined } from 'twenty-shared';
-import { useCallback } from 'react';
export const useIsMatchingLocation = () => {
const location = useLocation();
- // Infinite loop issue caused by `checkUserExistsQuery` in `useSignInUp`.
- // Without executing this query, there is no infinite loop.
- // I also noticed that in `isMatchingLocation` inside `continueWithEmail`, no loop occurs.
- // Both functions are called within the `useEffect` of `SignInUpWorkspaceScopeFormEffect`.
- // This led me to conclude that the issue comes from `useIsMatchingLocation`.
- // Using `useCallback` prevent the loop.
- const isMatchingLocation = useCallback(
- (path: string, basePath?: AppBasePath) => {
- const addTrailingSlash = (path: string) =>
- path.endsWith('/') ? path : path + '/';
+ const addTrailingSlash = (path: string) =>
+ path.endsWith('/') ? path : path + '/';
- const getConstructedPath = (path: string, basePath?: AppBasePath) => {
- if (!isNonEmptyString(basePath)) return path;
+ const getConstructedPath = (path: string, basePath?: AppBasePath) => {
+ if (!isNonEmptyString(basePath)) return path;
- return addTrailingSlash(basePath) + path;
- };
+ return addTrailingSlash(basePath) + path;
+ };
- const match = matchPath(
- getConstructedPath(path, basePath),
- location.pathname,
- );
- return isDefined(match);
- },
- [location.pathname],
- );
+ const isMatchingLocation = (path: string, basePath?: AppBasePath) => {
+ const match = matchPath(
+ getConstructedPath(path, basePath),
+ location.pathname,
+ );
+ return isDefined(match);
+ };
greptile
logic: Removing useCallback here may cause performance issues and infinite loops in useEffect dependencies. The previous implementation specifically noted this prevented loops with checkUserExistsQuery.
diff block
+use anyhow::Result;
+use bitvec::prelude::*;
+use clap::Parser;
+use futures::stream::{FuturesUnordered, StreamExt};
+use indicatif::{ProgressBar, ProgressStyle};
+use reqwest::{Client, StatusCode, Url};
+use serde_json::json;
+use sha2::{Digest, Sha256};
+use std::fs::{File, OpenOptions};
+use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
+use std::path::Path;
+use std::sync::Arc;
+use tokio::sync::Mutex;
+
+const DEFAULT_MAX_RETRIES: usize = 3;
+const RETRY_BACKOFF_MS: u64 = 500;
+const DEFAULT_OUTPUT_FILENAME: &str = "output.bin";
+const DEFAULT_CONNECTIONS: usize = 16;
+const DEFAULT_CHUNK_SIZE_MB: usize = 5;
+const DEFAULT_BUFFER_SIZE_MB: usize = 8;
+const DEFAULT_VERBOSE: bool = false;
+const DEFAULT_SILENT: bool = false;
+const DEFAULT_LOG: bool = false;
+const DEFAULT_FORCE_NEW: bool = false;
+const DEFAULT_RESUME_ONLY: bool = false;
+const HEADER_SIZE: usize = 4096;
+const MAGIC_NUMBER: &[u8; 5] = b"HYDRA";
+const FORMAT_VERSION: u8 = 1;
+
+#[derive(Parser)]
+#[command(name = "hydra-httpdl")]
+#[command(author = "los-broxas")]
+#[command(version = "0.2.0")]
+#[command(about = "high speed and low resource usage http downloader with resume capability", long_about = None)]
+struct CliArgs {
+ /// file url to download
+ #[arg(required = true)]
+ url: String,
+
+ /// output file path (or directory to save with original filename)
+ #[arg(default_value = DEFAULT_OUTPUT_FILENAME)]
+ output: String,
+
+ /// number of concurrent connections for parallel download
+ #[arg(short = 'c', long, default_value_t = DEFAULT_CONNECTIONS)]
+ connections: usize,
+
+ /// chunk size in MB for each connection
+ #[arg(short = 'k', long, default_value_t = DEFAULT_CHUNK_SIZE_MB)]
+ chunk_size: usize,
+
+ /// buffer size in MB for file writing
+ #[arg(short, long, default_value_t = DEFAULT_BUFFER_SIZE_MB)]
+ buffer_size: usize,
+
+ /// show detailed progress information
+ #[arg(short = 'v', long, default_value_t = DEFAULT_VERBOSE)]
+ verbose: bool,
+
+ /// suppress progress bar
+ #[arg(short = 's', long, default_value_t = DEFAULT_SILENT)]
+ silent: bool,
+
+ /// log download statistics in JSON format every second
+ #[arg(short = 'l', long, default_value_t = DEFAULT_LOG)]
+ log: bool,
+
+ /// force new download, ignore existing partial files
+ #[arg(short = 'f', long, default_value_t = DEFAULT_FORCE_NEW)]
+ force_new: bool,
+
+ /// only resume existing download, exit if no partial file exists
+ #[arg(short = 'r', long, default_value_t = DEFAULT_RESUME_ONLY)]
+ resume_only: bool,
+}
+
+struct DownloadConfig {
+ url: String,
+ output_path: String,
+ num_connections: usize,
+ chunk_size: usize,
+ buffer_size: usize,
+ verbose: bool,
+ silent: bool,
+ log: bool,
+ force_new: bool,
+ resume_only: bool,
+}
+
+impl DownloadConfig {
+ fn should_log(&self) -> bool {
+ self.verbose && !self.silent
+ }
+
+ fn should_log_stats(&self) -> bool {
+ self.log
+ }
+}
+
+struct DownloadStats {
+ progress_percent: f64,
+ bytes_downloaded: u64,
+ total_size: u64,
+ speed_bytes_per_sec: f64,
+ eta_seconds: u64,
+ elapsed_seconds: u64,
+}
+
+struct HydraHeader {
+ magic: [u8; 5], // "HYDRA" identifier
+ version: u8, // header version
+ file_size: u64, // file size
+ etag: [u8; 32], // etag hash
+ url_hash: [u8; 32], // url hash
+ chunk_size: u32, // chunk size
+ chunk_count: u32, // chunk count
+ chunks_bitmap: BitVec<u8>, // chunks bitmap
+}
+
+impl HydraHeader {
+ fn new(file_size: u64, etag: &str, url: &str, chunk_size: u32) -> Self {
+ let chunk_count = ((file_size as f64) / (chunk_size as f64)).ceil() as u32;
+ let chunks_bitmap = bitvec![u8, Lsb0; 0; chunk_count as usize];
+
+ let mut etag_hash = [0u8; 32];
+ let etag_digest = Sha256::digest(etag.as_bytes());
+ etag_hash.copy_from_slice(&etag_digest[..]);
+
+ let mut url_hash = [0u8; 32];
+ let url_digest = Sha256::digest(url.as_bytes());
+ url_hash.copy_from_slice(&url_digest[..]);
+
+ Self {
+ magic: *MAGIC_NUMBER,
+ version: FORMAT_VERSION,
+ file_size,
+ etag: etag_hash,
+ url_hash,
+ chunk_size,
+ chunk_count,
+ chunks_bitmap,
+ }
+ }
+
+ fn write_to_file<W: Write + Seek>(&self, writer: &mut W) -> Result<()> {
+ writer.write_all(&self.magic)?;
+ writer.write_all(&[self.version])?;
+ writer.write_all(&self.file_size.to_le_bytes())?;
+ writer.write_all(&self.etag)?;
+ writer.write_all(&self.url_hash)?;
+ writer.write_all(&self.chunk_size.to_le_bytes())?;
+ writer.write_all(&self.chunk_count.to_le_bytes())?;
+
+ let bitmap_bytes = self.chunks_bitmap.as_raw_slice();
+ writer.write_all(bitmap_bytes)?;
+
+ let header_size = 5 + 1 + 8 + 32 + 32 + 4 + 4 + bitmap_bytes.len();
+ let padding_size = HEADER_SIZE - header_size;
+ let padding = vec![0u8; padding_size];
+ writer.write_all(&padding)?;
+
+ Ok(())
+ }
+
+ fn read_from_file<R: Read + Seek>(reader: &mut R) -> Result<Self> {
+ let mut magic = [0u8; 5];
+ reader.read_exact(&mut magic)?;
+
+ if magic != *MAGIC_NUMBER {
+ anyhow::bail!("Not a valid Hydra download file");
+ }
+
+ let mut version = [0u8; 1];
+ reader.read_exact(&mut version)?;
+
+ if version[0] != FORMAT_VERSION {
+ anyhow::bail!("Incompatible format version");
+ }
+
+ let mut file_size_bytes = [0u8; 8];
+ reader.read_exact(&mut file_size_bytes)?;
+ let file_size = u64::from_le_bytes(file_size_bytes);
+
+ let mut etag = [0u8; 32];
+ reader.read_exact(&mut etag)?;
+
+ let mut url_hash = [0u8; 32];
+ reader.read_exact(&mut url_hash)?;
+
+ let mut chunk_size_bytes = [0u8; 4];
+ reader.read_exact(&mut chunk_size_bytes)?;
+ let chunk_size = u32::from_le_bytes(chunk_size_bytes);
+
+ let mut chunk_count_bytes = [0u8; 4];
+ reader.read_exact(&mut chunk_count_bytes)?;
+ let chunk_count = u32::from_le_bytes(chunk_count_bytes);
+
+ let bitmap_bytes_len = (chunk_count as usize + 7) / 8;
+ let mut bitmap_bytes = vec![0u8; bitmap_bytes_len];
+ reader.read_exact(&mut bitmap_bytes)?;
+
+ let chunks_bitmap = BitVec::<u8, Lsb0>::from_vec(bitmap_bytes);
+
+ reader.seek(SeekFrom::Start(HEADER_SIZE as u64))?;
+
+ Ok(Self {
+ magic,
+ version: version[0],
+ file_size,
+ etag,
+ url_hash,
+ chunk_size,
+ chunk_count,
+ chunks_bitmap,
+ })
+ }
+
+ fn set_chunk_complete(&mut self, chunk_index: usize) -> Result<()> {
+ if chunk_index >= self.chunk_count as usize {
+ anyhow::bail!("Chunk index out of bounds");
+ }
+
+ self.chunks_bitmap.set(chunk_index, true);
+ Ok(())
+ }
+
+ fn is_chunk_complete(&self, chunk_index: usize) -> bool {
+ if chunk_index >= self.chunk_count as usize {
+ return false;
+ }
+
+ self.chunks_bitmap[chunk_index]
+ }
+
+ fn get_incomplete_chunks(&self) -> Vec<(u64, u64)> {
+ let mut chunks = Vec::new();
+ let chunk_size = self.chunk_size as u64;
+
+ for i in 0..self.chunk_count as usize {
+ if !self.is_chunk_complete(i) {
+ let start = i as u64 * chunk_size;
+ let end = std::cmp::min((i as u64 + 1) * chunk_size - 1, self.file_size - 1);
+ chunks.push((start, end));
+ }
+ }
+
+ chunks
+ }
+
+ fn is_download_complete(&self) -> bool {
+ self.chunks_bitmap.count_ones() == self.chunk_count as usize
+ }
+}
+
+struct ProgressTracker {
+ bar: Option<ProgressBar>,
+}
+
+impl ProgressTracker {
+ fn new(file_size: u64, silent: bool, enable_stats: bool) -> Result<Self> {
+ let bar = if !silent || enable_stats {
+ let pb = ProgressBar::new(file_size);
+ pb.set_style(
+ ProgressStyle::default_bar()
+ .template("[{elapsed_precise}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")?
+ );
+ if silent {
+ pb.set_draw_target(indicatif::ProgressDrawTarget::hidden());
+ }
+ Some(pb)
+ } else {
+ None
+ };
+
+ Ok(Self { bar })
+ }
+
+ fn increment(&self, amount: u64) {
+ if let Some(pb) = &self.bar {
+ pb.inc(amount);
+ }
+ }
+
+ fn finish(&self) {
+ if let Some(pb) = &self.bar {
+ pb.finish_with_message("Download complete");
+ }
+ }
+
+ fn get_stats(&self) -> Option<DownloadStats> {
+ if let Some(pb) = &self.bar {
+ let position = pb.position();
+ let total = pb.length().unwrap_or(1);
+
+ Some(DownloadStats {
+ progress_percent: position as f64 / total as f64,
+ bytes_downloaded: position,
+ total_size: total,
+ speed_bytes_per_sec: pb.per_sec(),
+ eta_seconds: pb.eta().as_secs(),
+ elapsed_seconds: pb.elapsed().as_secs(),
+ })
+ } else {
+ None
+ }
+ }
+}
+
+struct Downloader {
+ client: Client,
+ config: DownloadConfig,
+}
+
+impl Downloader {
+ async fn download(&self) -> Result<()> {
+ let (file_size, filename, etag) = self.get_file_info().await?;
+ let output_path = self.determine_output_path(filename);
+
+ if self.config.should_log() {
+ println!("Detected filename: {}", output_path);
+ }
+
+ let resume_manager = ResumeManager::try_from_file(
+ &output_path,
+ file_size,
+ &etag,
+ &self.config.url,
+ self.config.chunk_size as u32,
+ self.config.force_new,
+ self.config.resume_only,
+ )?;
+
+ let file = self.prepare_output_file(&output_path, file_size)?;
+ let progress = ProgressTracker::new(file_size, self.config.silent, self.config.log)?;
+
+ let chunks = if resume_manager.is_download_complete() {
+ if self.config.should_log() {
+ println!("File is already fully downloaded, finalizing...");
+ }
+ resume_manager.finalize_download()?;
+ return Ok(());
+ } else {
+ let completed_chunks = resume_manager.header.chunks_bitmap.count_ones() as u32;
+ let total_chunks = resume_manager.header.chunk_count;
+
+ if completed_chunks > 0 {
+ if self.config.should_log() {
+ let percent_done = (completed_chunks as f64 / total_chunks as f64) * 100.0;
+ println!("Resuming download: {:.1}% already downloaded", percent_done);
+ }
+
+ if let Some(pb) = &progress.bar {
+ let downloaded = file_size * completed_chunks as u64 / total_chunks as u64;
+ pb.inc(downloaded);
+ }
+ }
+
+ resume_manager.get_incomplete_chunks()
+ };
+
+ if self.config.should_log() {
+ println!(
+ "Downloading {} chunks of total {}",
+ chunks.len(),
+ resume_manager.header.chunk_count
+ );
+ }
+
+ self.process_chunks_with_resume(
+ chunks,
+ file,
+ file_size,
+ progress,
+ output_path.clone(),
+ resume_manager,
+ )
+ .await?;
+
+ Ok(())
+ }
+
+ fn determine_output_path(&self, filename: Option<String>) -> String {
+ if Path::new(&self.config.output_path)
+ .file_name()
+ .unwrap_or_default()
+ == DEFAULT_OUTPUT_FILENAME
+ && filename.is_some()
+ {
+ filename.unwrap()
+ } else {
+ self.config.output_path.clone()
+ }
+ }
+
+ fn prepare_output_file(&self, path: &str, size: u64) -> Result<Arc<Mutex<BufWriter<File>>>> {
+ let file = if Path::new(path).exists() {
+ OpenOptions::new().read(true).write(true).open(path)?
+ } else {
+ let file = File::create(path)?;
+ file.set_len(HEADER_SIZE as u64 + size)?;
+ file
+ };
+
+ Ok(Arc::new(Mutex::new(BufWriter::with_capacity(
+ self.config.buffer_size,
+ file,
+ ))))
+ }
+
+ async fn process_chunks_with_resume(
+ &self,
+ chunks: Vec<(u64, u64)>,
+ file: Arc<Mutex<BufWriter<File>>>,
+ _file_size: u64,
+ progress: ProgressTracker,
+ real_filename: String,
+ resume_manager: ResumeManager,
+ ) -> Result<()> {
+ let mut tasks = FuturesUnordered::new();
+
+ let log_progress = if self.config.should_log_stats() {
+ let progress_clone = progress.bar.clone();
+ let filename = real_filename.clone();
+
+ let log_task = tokio::spawn(async move {
+ let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(1));
+ let tracker = ProgressTracker {
+ bar: progress_clone,
+ };
+
+ loop {
+ interval.tick().await;
+ if let Some(stats) = tracker.get_stats() {
+ let json_output = json!({
+ "progress": stats.progress_percent,
+ "speed_bps": stats.speed_bytes_per_sec,
+ "downloaded_bytes": stats.bytes_downloaded,
+ "total_bytes": stats.total_size,
+ "eta_seconds": stats.eta_seconds,
+ "elapsed_seconds": stats.elapsed_seconds,
+ "filename": filename
+ });
+ println!("{}", json_output);
+ }
+ }
+ });
greptile
style: This logging task creates an infinite loop that will only be terminated by the abort() call. Consider adding a channel-based shutdown mechanism for cleaner termination.
diff block
+import datetime
+import itertools
+from collections.abc import Iterator
+from dataclasses import dataclass
+from functools import reduce
+from typing import ClassVar
+
+import dagster
+import pydantic
+from clickhouse_driver import Client
+from dateutil.relativedelta import relativedelta
+
+from posthog import settings
+from posthog.clickhouse.cluster import ClickhouseCluster, MutationRunner
+
+
+class PartitionRange(dagster.Config):
+ lower: str
+ upper: str
+
+ FORMAT: ClassVar[str] = "%Y%m"
+
+ def iter_dates(self) -> Iterator[str]:
+ date_lower = self.parse_date(self.lower)
+ date_upper = self.parse_date(self.upper)
+ seq = itertools.count()
+ while (date := date_lower + relativedelta(months=next(seq))) <= date_upper:
+ yield date
greptile
logic: Potential infinite loop if date_upper is less than date_lower after adding months. Consider adding a safety limit.
diff block
+use borsh::{to_vec, BorshDeserialize};
+use dlp::pda::commit_state_pda_from_delegated_account;
+use log::*;
+use magicblock_rpc_client::{
+ MagicBlockRpcClientError, MagicBlockRpcClientResult,
+ MagicBlockSendTransactionConfig,
+};
+use solana_pubkey::Pubkey;
+use std::{
+ collections::{HashMap, HashSet},
+ sync::Arc,
+ time::Duration,
+};
+use tokio::task::JoinSet;
+
+use magicblock_committor_program::{
+ instruction::{
+ create_init_ix, create_realloc_buffer_ixs,
+ create_realloc_buffer_ixs_to_add_remaining, create_write_ix,
+ CreateInitIxArgs, CreateReallocBufferIxArgs, CreateWriteIxArgs,
+ },
+ instruction_chunks::chunk_realloc_ixs,
+ Changeset, ChangesetChunk, Chunks, CommitableAccount,
+};
+
+use crate::{
+ commit::common::get_accounts_to_undelegate,
+ commit_stage::CommitSignatures,
+ error::{CommitAccountError, CommitAccountResult},
+ finalize::{
+ chunked_ixs_to_finalize_commitables,
+ ChunkedIxsToFinalizeCommitablesResult,
+ },
+ persist::CommitStrategy,
+ types::InstructionsKind,
+ undelegate::{
+ chunked_ixs_to_undelegate_commitables, undelegate_commitables_ixs,
+ },
+ CommitInfo, CommitStage,
+};
+
+use super::{
+ common::send_and_confirm,
+ process_buffers::{
+ chunked_ixs_to_process_commitables_and_close_pdas,
+ ChunkedIxsToProcessCommitablesAndClosePdasResult,
+ },
+ CommittorProcessor,
+};
+use solana_sdk::{hash::Hash, instruction::Instruction, signer::Signer};
+
+struct NextReallocs {
+ missing_size: u64,
+ start_idx: usize,
+}
+
+impl CommittorProcessor {
+ /// Commits the changeset by initializing the accounts, writing the chunks,
+ /// and closing the pdas.
+ /// NOTE: we return no error since the validator would not know how to mitigate
+ /// the problem.
+ pub async fn commit_changeset_using_buffers(
+ processor: Arc<Self>,
+ changeset: Changeset,
+ finalize: bool,
+ ephemeral_blockhash: Hash,
+ use_lookup: bool,
+ ) -> Vec<CommitStage> {
+ macro_rules! handle_unchunked {
+ ($unchunked:ident, $commit_stages:ident, $commit_stage:expr) => {
+ for (bundle_id, commit_infos) in $unchunked.into_iter() {
+ // The max amount of accounts we can commit and process as part of a single
+ // transaction is [crate::max_per_transaction::MAX_COMMIT_STATE_AND_CLOSE_PER_TRANSACTION].
+ warn!(
+ "Commit infos for bundle id {} are too many to be processed in a single transaction",
+ bundle_id
+ );
+ $commit_stages.extend(
+ commit_infos
+ .into_iter()
+ .map($commit_stage),
+ );
+ }
+ }
+ }
+
+ let owners = changeset.owners();
+ let accounts_len = changeset.account_keys().len();
+ let commit_strategy = if use_lookup {
+ CommitStrategy::FromBufferWithLookupTable
+ } else {
+ CommitStrategy::FromBuffer
+ };
+ let accounts_to_undelegate =
+ get_accounts_to_undelegate(&changeset, finalize);
+ let results = processor
+ .prepare_changeset_buffers(
+ changeset,
+ ephemeral_blockhash,
+ commit_strategy,
+ finalize,
+ )
+ .await;
+
+ let mut commit_stages = vec![];
+
+ // 1. Init Buffer and Chunks Account
+ let (mut succeeded_inits, failed_inits): (Vec<_>, Vec<_>) = {
+ let (succeeded, failed): (Vec<_>, Vec<_>) =
+ results.into_iter().partition(Result::is_ok);
+ (
+ succeeded
+ .into_iter()
+ .map(Result::unwrap)
+ .collect::<Vec<_>>(),
+ failed
+ .into_iter()
+ .map(Result::unwrap_err)
+ .collect::<Vec<_>>(),
+ )
+ };
+
+ // If we couldn't init the buffers for a specific commit then we're done with it.
+ for commit_err in failed_inits.into_iter() {
+ let commit_stage = CommitStage::from(commit_err);
+ let bundle_id = commit_stage.commit_metadata().bundle_id();
+ commit_stages.push(commit_stage);
+
+ // We also need to skip all committables that are in the same bundle as
+ // a commit we're giving up on.
+ let (fail_in_order_to_respect_bundle, keep): (Vec<_>, Vec<_>) =
+ succeeded_inits.drain(..).partition(|commit_info| {
+ #[allow(clippy::let_and_return)]
+ let same_bundle = commit_info.bundle_id() == bundle_id;
+ same_bundle
+ });
+ commit_stages.extend(
+ fail_in_order_to_respect_bundle.into_iter().map(|x| {
+ CommitStage::BufferAndChunkFullyInitialized((
+ x,
+ commit_strategy,
+ ))
+ }),
+ );
+ succeeded_inits.extend(keep);
+ }
+
+ // 2. Create chunks of instructions that process the commits and respect desired bundles
+ let ChunkedIxsToProcessCommitablesAndClosePdasResult {
+ chunked_ixs,
+ chunked_close_ixs,
+ unchunked,
+ } = chunked_ixs_to_process_commitables_and_close_pdas(
+ processor.authority.pubkey(),
+ succeeded_inits.clone(),
+ use_lookup,
+ );
+ handle_unchunked!(
+ unchunked,
+ commit_stages,
+ CommitStage::PartOfTooLargeBundleToProcess
+ );
+
+ // 3. Process all chunks via transactions, one per chunk of instructions
+ trace!(
+ "ChunkedIxs: {}",
+ chunked_ixs
+ .iter()
+ .map(|xs| xs
+ .iter()
+ .map(|x| x.to_string())
+ .collect::<Vec<_>>()
+ .join("\n"))
+ .collect::<Vec<_>>()
+ .join("]\n\n[\n")
+ );
+ debug_assert_eq!(
+ chunked_ixs.iter().map(|x| x.len()).sum::<usize>() + commit_stages.len(),
+ accounts_len,
+ "Sum of instructions and early bail out stages should have one instruction per commmitted account",
+ );
+
+ let table_mania = use_lookup.then(|| processor.table_mania.clone());
+ let (succeeded_process, failed_process) = processor
+ .process_ixs_chunks(
+ chunked_ixs,
+ chunked_close_ixs,
+ table_mania.as_ref(),
+ &owners,
+ )
+ .await;
+
+ commit_stages.extend(failed_process.into_iter().flat_map(
+ |(sig, xs)| {
+ let sigs = sig.map(|x| CommitSignatures {
+ process_signature: x,
+ finalize_signature: None,
+ undelegate_signature: None,
+ });
+ xs.into_iter()
+ .map(|x| {
+ CommitStage::FailedProcess((
+ x,
+ commit_strategy,
+ sigs.as_ref().cloned(),
+ ))
+ })
+ .collect::<Vec<_>>()
+ },
+ ));
+
+ let mut processed_commit_infos = vec![];
+ let mut processed_signatures = HashMap::new();
+ for (sig, commit_infos) in succeeded_process {
+ if log_enabled!(Level::Trace) {
+ let kinds = commit_infos
+ .iter()
+ .map(|(_, kind)| *kind)
+ .collect::<HashSet<InstructionsKind>>();
+ let handled = kinds
+ .iter()
+ .map(|x| format!("{:?}", x))
+ .collect::<Vec<_>>()
+ .join(" | ");
+ trace!(
+ "Successfully handled ({}) for {} commit info(s) via transaction '{}'",
+ handled,
+ commit_infos.len(),
+ sig
+ );
+ }
+ for (commit_info, _) in commit_infos
+ .into_iter()
+ .filter(|(_, kind)| kind.is_processing())
+ {
+ let bundle_id = commit_info.bundle_id();
+ debug_assert!(
+ processed_signatures
+ .get(&bundle_id)
+ .map(|x| x == &sig)
+ .unwrap_or(true),
+ "BUG: Same processed bundle ids should have the same signature"
+ );
+ processed_signatures.insert(bundle_id, sig);
+ processed_commit_infos.push(commit_info);
+ }
+ }
+
+ // 4. Optionally finalize + undelegate all processed commits also respecting bundles
+ if finalize && !processed_commit_infos.is_empty() {
+ // 4.1. Create chunks of finalize instructions that fit in a single transaction
+ let ChunkedIxsToFinalizeCommitablesResult {
+ chunked_ixs,
+ unchunked,
+ } = chunked_ixs_to_finalize_commitables(
+ processor.authority.pubkey(),
+ processed_commit_infos,
+ use_lookup,
+ );
+ handle_unchunked!(
+ unchunked,
+ commit_stages,
+ CommitStage::PartOfTooLargeBundleToFinalize
+ );
+
+ // 4.2. Run each finalize chunk in a single transaction
+ let (succeeded_finalize, failed_finalize): (Vec<_>, Vec<_>) =
+ processor
+ .process_ixs_chunks(
+ chunked_ixs,
+ None,
+ table_mania.as_ref(),
+ &owners,
+ )
+ .await;
+ commit_stages.extend(failed_finalize.into_iter().flat_map(
+ |(sig, infos)| {
+ infos
+ .into_iter()
+ .map(|x| {
+ let bundle_id = x.bundle_id();
+ CommitStage::FailedFinalize((
+ x,
+ commit_strategy,
+ CommitSignatures {
+ // SAFETY: signatures for all bundles of succeeded process transactions
+ // have been added above
+ process_signature: *processed_signatures
+ .get(&bundle_id)
+ .unwrap(),
+ finalize_signature: sig,
+ undelegate_signature: None,
+ },
+ ))
+ })
+ .collect::<Vec<_>>()
+ },
+ ));
+
+ let mut finalized_commit_infos = vec![];
+ let mut finalized_signatures = HashMap::new();
+ for (sig, commit_infos) in succeeded_finalize {
+ trace!(
+ "Successfully finalized {} commit infos via transaction '{}'",
+ commit_infos.len(),
+ sig
+ );
+ for (commit_info, kind) in commit_infos.iter() {
+ debug_assert_eq!(
+ kind,
+ &InstructionsKind::Finalize,
+ "Expecting separate finalize instructions onky"
+ );
+ let bundle_id = commit_info.bundle_id();
+ debug_assert!(
+ finalized_signatures
+ .get(&bundle_id)
+ .map(|x| x == &sig)
+ .unwrap_or(true),
+ "BUG: Same finalized bundle ids should have the same signature"
+ );
+
+ finalized_signatures.insert(bundle_id, sig);
+ }
+ let commit_infos = commit_infos
+ .into_iter()
+ .map(|(info, _)| info)
+ .collect::<Vec<_>>();
+ finalized_commit_infos.extend(commit_infos);
+ }
+ // 4.2. Consider undelegation by first dividing finalized accounts into two sets,
+ let (finalize_and_undelegate, finalize_only) =
+ finalized_commit_infos
+ .into_iter()
+ .partition::<Vec<_>, _>(|x| x.undelegate());
+ // 4.3.a accounts we don't need to undelegate are done
+ commit_stages.extend(finalize_only.into_iter().map(|x| {
+ let bundle_id = x.bundle_id();
+ CommitStage::Succeeded((
+ x,
+ commit_strategy,
+ CommitSignatures {
+ // SAFETY: signatures for all bundles of succeeded process transactions
+ // have been added above
+ process_signature: *processed_signatures
+ .get(&bundle_id)
+ .unwrap(),
+ finalize_signature: finalized_signatures
+ .get(&bundle_id)
+ .cloned(),
+ undelegate_signature: None,
+ },
+ ))
+ }));
+ // 4.3.b the other accounts need to be undelegated first
+ if let Some(accounts) = accounts_to_undelegate {
+ debug_assert_eq!(
+ accounts.len(),
+ finalize_and_undelegate.len(),
+ "BUG: same amount of accounts to undelegate as to finalize and undelegate"
+ );
+ let undelegate_ixs = match undelegate_commitables_ixs(
+ &processor.magicblock_rpc_client,
+ processor.authority.pubkey(),
+ accounts,
+ )
+ .await
+ {
+ Ok(ixs) => Some(ixs),
+ Err(err) => {
+ error!(
+ "Failed to prepare accounts undelegation '{}': {:?}",
+ err, err
+ );
+ commit_stages.extend(
+ finalize_and_undelegate.iter().map(|x| {
+ let bundle_id = x.bundle_id();
+ CommitStage::FailedUndelegate((
+ x.clone(),
+ CommitStrategy::args(use_lookup),
+ CommitSignatures {
+ // SAFETY: signatures for all bundles of succeeded process transactions
+ // have been added above
+ process_signature:
+ *processed_signatures
+ .get(&bundle_id)
+ .unwrap(),
+ finalize_signature:
+ finalized_signatures
+ .get(&bundle_id)
+ .cloned(),
+ undelegate_signature: err.signature(),
+ },
+ ))
+ }),
+ );
+ None
+ }
+ };
+ if let Some(undelegate_ixs) = undelegate_ixs {
+ let chunked_ixs = chunked_ixs_to_undelegate_commitables(
+ undelegate_ixs,
+ finalize_and_undelegate,
+ use_lookup,
+ );
+ let (succeeded_undelegate, failed_undelegate): (
+ Vec<_>,
+ Vec<_>,
+ ) = processor
+ .process_ixs_chunks(
+ chunked_ixs,
+ None,
+ table_mania.as_ref(),
+ &owners,
+ )
+ .await;
+
+ commit_stages.extend(
+ failed_undelegate.into_iter().flat_map(
+ |(sig, infos)| {
+ infos
+ .into_iter()
+ .map(|x| {
+ let bundle_id = x.bundle_id();
+ CommitStage::FailedUndelegate((
+ x,
+ commit_strategy,
+ CommitSignatures {
+ // SAFETY: signatures for all bundles of succeeded process transactions
+ // have been added above
+ process_signature:
+ *processed_signatures
+ .get(&bundle_id)
+ .unwrap(),
+ finalize_signature:
+ finalized_signatures
+ .get(&bundle_id)
+ .cloned(),
+ undelegate_signature: sig,
+ },
+ ))
+ })
+ .collect::<Vec<_>>()
+ },
+ ),
+ );
+ commit_stages.extend(
+ succeeded_undelegate.into_iter().flat_map(
+ |(sig, infos)| {
+ infos
+ .into_iter()
+ .map(|(x, _)| {
+ let bundle_id = x.bundle_id();
+ CommitStage::Succeeded((
+ x,
+ commit_strategy,
+ CommitSignatures {
+ // SAFETY: signatures for all bundles of succeeded process transactions
+ // have been added above
+ process_signature:
+ *processed_signatures
+ .get(&bundle_id)
+ .unwrap(),
+ finalize_signature:
+ finalized_signatures
+ .get(&bundle_id)
+ .cloned(),
+ undelegate_signature: Some(sig),
+ },
+ ))
+ })
+ .collect::<Vec<_>>()
+ },
+ ),
+ );
+ }
+ } else {
+ debug_assert!(
+ finalize_and_undelegate.is_empty(),
+ "BUG: We should either have accounts to undelegate or an empty finalize_and_undelegate"
+ );
+ }
+ } else {
+ commit_stages.extend(processed_commit_infos.into_iter().map(|x| {
+ let bundle_id = x.bundle_id();
+ CommitStage::Succeeded((
+ x,
+ commit_strategy,
+ CommitSignatures {
+ // SAFETY: signatures for all bundles of succeeded process transactions
+ // have been added above
+ process_signature: *processed_signatures
+ .get(&bundle_id)
+ .unwrap(),
+ finalize_signature: None,
+ undelegate_signature: None,
+ },
+ ))
+ }));
+ }
+
+ debug_assert_eq!(
+ accounts_len,
+ CommitStage::commit_infos(&commit_stages).len(),
+ "Should have one commit stage per commmitted account ({}) {:#?}",
+ accounts_len,
+ commit_stages
+ );
+
+ commit_stages
+ }
+
+ async fn prepare_changeset_buffers(
+ &self,
+ changeset: Changeset,
+ ephemeral_blockhash: Hash,
+ commit_strategy: CommitStrategy,
+ finalize: bool,
+ ) -> Vec<CommitAccountResult<CommitInfo>> {
+ let commitables =
+ changeset.into_committables(crate::consts::MAX_WRITE_CHUNK_SIZE);
+ let mut join_set: JoinSet<CommitAccountResult<CommitInfo>> =
+ JoinSet::new();
+ for commitable in commitables {
+ let me = Arc::new(self.clone());
+ join_set.spawn(Self::commit_account(
+ me,
+ commitable,
+ ephemeral_blockhash,
+ commit_strategy,
+ finalize,
+ ));
+ }
+ join_set.join_all().await
+ }
+
+ async fn commit_account(
+ me: Arc<Self>,
+ mut commitable: CommitableAccount,
+ ephemeral_blockhash: Hash,
+ commit_strategy: CommitStrategy,
+ finalize: bool,
+ ) -> CommitAccountResult<CommitInfo> {
+ let commit_info = if commitable.has_data() {
+ let chunks =
+ Chunks::new(commitable.chunk_count(), commitable.chunk_size());
+ let chunks_account_size = to_vec(&chunks).unwrap().len() as u64;
+
+ // Initialize the Changeset and Chunks accounts on chain
+ let buffer_account_size = commitable.size() as u64;
+
+ let (init_ix, chunks_pda, buffer_pda) =
+ create_init_ix(CreateInitIxArgs {
+ authority: me.authority.pubkey(),
+ pubkey: commitable.pubkey,
+ chunks_account_size,
+ buffer_account_size,
+ blockhash: ephemeral_blockhash,
+ chunk_count: commitable.chunk_count(),
+ chunk_size: commitable.chunk_size(),
+ });
+ let realloc_ixs =
+ create_realloc_buffer_ixs(CreateReallocBufferIxArgs {
+ authority: me.authority.pubkey(),
+ pubkey: commitable.pubkey,
+ buffer_account_size,
+ blockhash: ephemeral_blockhash,
+ });
+
+ let commit_info = CommitInfo::BufferedDataAccount {
+ pubkey: commitable.pubkey,
+ commit_state: commit_state_pda_from_delegated_account(
+ &commitable.pubkey,
+ ),
+ delegated_account_owner: commitable.delegated_account_owner,
+ slot: commitable.slot,
+ ephemeral_blockhash,
+ undelegate: commitable.undelegate,
+ chunks_pda,
+ buffer_pda,
+ lamports: commitable.lamports,
+ bundle_id: commitable.bundle_id,
+ finalize,
+ };
+
+ // Even though this transaction also inits the chunks account we check
+ // that it succeeded by querying the buffer account since this is the
+ // only of the two that we may have to realloc.
+ let commit_info = Arc::new(
+ me.init_accounts(
+ init_ix,
+ realloc_ixs,
+ commitable.pubkey,
+ &buffer_pda,
+ buffer_account_size,
+ ephemeral_blockhash,
+ commit_info,
+ commit_strategy,
+ )
+ .await?,
+ );
+
+ let mut last_write_chunks_err = None;
+ if let Err(err) = me
+ .write_chunks(
+ commitable.pubkey,
+ commitable.iter_all(),
+ ephemeral_blockhash,
+ )
+ .await
+ {
+ last_write_chunks_err = Some(err);
+ };
+
+ let mut remaining_tries = 10;
+ const MAX_GET_ACCOUNT_RETRIES: usize = 5;
+ loop {
+ let mut acc = None;
+ let mut last_get_account_err = None;
+ for _ in 0..MAX_GET_ACCOUNT_RETRIES {
+ match me
+ .magicblock_rpc_client
+ .get_account(&chunks_pda)
+ .await
+ {
+ Ok(Some(x)) => {
+ acc.replace(x);
+ break;
+ }
+ Ok(None) => {
+ me.wait_for_account("chunks account", None).await
+ }
+ Err(err) => {
+ me.wait_for_account("chunks account", Some(&err))
+ .await;
+ last_get_account_err.replace(err);
+ }
+ }
+ }
+ let Some(acc) = acc else {
+ return Err(CommitAccountError::GetChunksAccount(
+ last_get_account_err,
+ commit_info.clone(),
+ commit_strategy,
+ ));
+ };
+ let chunks =
+ Chunks::try_from_slice(&acc.data).map_err(|err| {
+ CommitAccountError::DeserializeChunksAccount(
+ err,
+ commit_info.clone(),
+ commit_strategy,
+ )
+ })?;
+
+ if chunks.is_complete() {
+ break;
+ }
+
+ remaining_tries -= 1;
+ if remaining_tries == 0 {
+ return Err(
+ CommitAccountError::WriteChunksRanOutOfRetries(
+ last_write_chunks_err,
+ commit_info.clone(),
+ commit_strategy,
+ ),
+ );
+ }
+ commitable.set_chunks(chunks);
+ if let Err(err) = me
+ .write_chunks(
+ commitable.pubkey,
+ commitable.iter_missing(),
+ ephemeral_blockhash,
+ )
+ .await
+ {
+ last_write_chunks_err = Some(err);
+ }
+ }
+ commit_info
+ } else {
+ Arc::new(CommitInfo::EmptyAccount {
+ pubkey: commitable.pubkey,
+ delegated_account_owner: commitable.delegated_account_owner,
+ slot: commitable.slot,
+ ephemeral_blockhash,
+ undelegate: commitable.undelegate,
+ lamports: commitable.lamports,
+ bundle_id: commitable.bundle_id,
+ finalize,
+ })
+ };
+
+ let commit_info = Arc::<CommitInfo>::unwrap_or_clone(commit_info);
+
+ Ok(commit_info)
+ }
+
+ /// Sends init/realloc transactions until the account has the desired size
+ /// - `init_ix` - the instruction to initialize the buffer and chunk account
+ /// - `realloc_ixs` - the instructions to realloc the buffer account until it reaches the
+ /// size needed to store the account's data
+ /// - `pubkey` - the pubkey of the account whose data we are storing
+ /// - `buffer_pda` - the address of the account where we buffer the data to be committed
+ /// - `buffer_account_size` - the size of the buffer account
+ /// - `ephemeral_blockhash` - the blockhash in the ephemeral at which we are committing
+ /// - `commit_info` - the commit info to be returned or included in errors
+ /// - `commit_strategy` - the commit strategy that is used
+ #[allow(clippy::too_many_arguments)] // private method
+ async fn init_accounts(
+ &self,
+ init_ix: Instruction,
+ realloc_ixs: Vec<Instruction>,
+ pubkey: Pubkey,
+ buffer_pda: &Pubkey,
+ buffer_account_size: u64,
+ ephemeral_blockhash: Hash,
+ commit_info: CommitInfo,
+ commit_strategy: CommitStrategy,
+ ) -> CommitAccountResult<CommitInfo> {
+ // We cannot allocate more than MAX_INITIAL_BUFFER_SIZE in a single
+ // instruction. Therefore we append a realloc instruction if the buffer
+ // is very large.
+ // init_ixs is the init ix with as many realloc ixs as fit into one tx
+ // extra_realloc_ixs are the remaining realloc ixs that need to be sent
+ // in separate transactions
+ let (init_ix_chunk, extra_realloc_ix_chunks) = {
+ let mut chunked_ixs = chunk_realloc_ixs(realloc_ixs, Some(init_ix));
+ let init_with_initial_reallocs = chunked_ixs.remove(0);
+ let remaining_reallocs = if chunked_ixs.is_empty() {
+ None
+ } else {
+ Some(chunked_ixs)
+ };
+ (init_with_initial_reallocs, remaining_reallocs)
+ };
+
+ debug!(
+ "Init+Realloc chunk ixs {}, Extra Realloc Chunks {}",
+ init_ix_chunk.len(),
+ extra_realloc_ix_chunks.as_ref().map_or(0, |x| x.len())
+ );
+
+ // First ensure that the tx including the init ix lands
+ let mut init_sig = None;
+ let mut last_err = None;
+ const MAX_RETRIES: usize = 2;
+ 'land_init_transaction: for _ in 0..MAX_RETRIES {
+ // Only retry the init transaction if it failed to send and confirm
+ if init_sig.is_none() {
+ let init_budget_ixs = self
+ .compute_budget_config
+ .buffer_init
+ .instructions(init_ix_chunk.len() - 1);
+ match send_and_confirm(
+ self.magicblock_rpc_client.clone(),
+ self.authority.insecure_clone(),
+ [init_budget_ixs, init_ix_chunk.clone()].concat(),
+ "init buffer and chunk account".to_string(),
+ None,
+ MagicBlockSendTransactionConfig::ensure_committed(),
+ None,
+ )
+ .await
+ {
+ Err(err) => {
+ last_err = Some(err);
+ continue;
+ }
+ Ok(sig) => {
+ init_sig = Some(sig);
+ }
+ }
+ }
+
+ // At this point the transaction was confirmed and we should be able
+ // to get the initialized pda and chunk account
+ const MAX_GET_ACCOUNT_RETRIES: usize = 5;
+ for _ in 0..MAX_GET_ACCOUNT_RETRIES {
+ match self.magicblock_rpc_client.get_account(buffer_pda).await {
+ Ok(Some(_)) => {
+ // The account was initialized
+ break 'land_init_transaction;
+ }
+ Ok(None) => {
+ self.wait_for_account("buffer account", None).await
+ }
+ Err(err) => {
+ self.wait_for_account("buffer account", Some(&err))
+ .await
+ }
+ }
+ }
+ } // 'land_init_transaction
+
+ if init_sig.is_none() {
+ let err = last_err
+ .as_ref()
+ .map(|x| x.to_string())
+ .unwrap_or("Unknown Error".to_string());
+ return Err(CommitAccountError::InitBufferAndChunkAccounts(
+ err,
+ Box::new(commit_info),
+ commit_strategy,
+ ));
+ }
+
+ // After that we can ensure all extra reallocs in parallel
+ if let Some(realloc_ixs) = extra_realloc_ix_chunks {
+ let mut next_reallocs = self
+ .run_reallocs(
+ buffer_pda,
+ realloc_ixs,
+ buffer_account_size,
+ buffer_account_size,
+ 0,
+ )
+ .await;
+
+ if next_reallocs.is_some() {
+ let args = CreateReallocBufferIxArgs {
+ authority: self.authority.pubkey(),
+ pubkey,
+ buffer_account_size,
+ blockhash: ephemeral_blockhash,
+ };
+
+ while let Some(NextReallocs {
+ missing_size,
+ start_idx,
+ }) = next_reallocs
+ {
+ let realloc_ixs = {
+ let realloc_ixs =
+ create_realloc_buffer_ixs_to_add_remaining(
+ &args,
+ missing_size,
+ );
+
+ chunk_realloc_ixs(realloc_ixs, None)
+ };
+ next_reallocs = self
+ .run_reallocs(
+ buffer_pda,
+ realloc_ixs,
+ buffer_account_size,
+ missing_size,
+ start_idx,
+ )
+ .await;
+ // TODO(thlorenz): give up at some point
+ }
greptile
logic: Missing retry limit for realloc attempts. Could lead to infinite loop if reallocations consistently fail.
diff block
+export function hook(
+ _this: { [x: string]: any },
+ method: string | number,
+ callback: (arg0: any) => void,
+) {
+ const orig = _this[method];
+
+ return (...args: any) => {
+ callback.apply(_this, args);
+
+ return orig.apply(_this, args);
+ };
+}
+
+export function sleep(ms: number | undefined) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+export function shuffleArray(a) {
+ const arr = a.slice();
+ for (let i = arr.length - 1; i > 0; i--) {
+ const j = Math.floor(Math.random() * (i + 1));
+ const temp = arr[i];
+ arr[i] = arr[j];
+ arr[j] = temp;
+ }
+ return arr;
+}
+
+export function chunkArray(arr: any[], size: number) {
+ const chunks: any[] = [];
+
+ let index = 0;
+ while (index < arr.length) {
+ chunks.push(arr.slice(index, size + index));
+ index += size;
+ }
+
+ return chunks;
+}
greptile
logic: No validation for negative or zero size parameter which could cause infinite loop or empty chunks.
diff block
/// Creates a signal builder.
pub fn signal<T: Signal + Serialize>(&self, body: T) -> builder::signal::SignalBuilder<T> {
+ // TODO: Add check for from_workflow so you cant dispatch a signal
builder::signal::SignalBuilder::new(self.db.clone(), self.ray_id, body)
greptile
logic: This TODO should be implemented before merging. Signals dispatched from within a workflow could cause unexpected behavior or infinite loops. Consider throwing a BuilderError::CannotDispatchFromOpInWorkflow error if self.op_ctx.from_workflow() is true, similar to the check in compat.rs
diff block
+import { actions, connect, kea, key, listeners, path, props, selectors } from 'kea'
+import { router } from 'kea-router'
+import { TaxonomicFilterGroupType } from 'lib/components/TaxonomicFilter/types'
+import { featureFlagLogic } from 'lib/logic/featureFlagLogic'
+import { MathAvailability } from 'scenes/insights/filters/ActionFilter/ActionFilterRow/ActionFilterRow'
+import { insightVizDataLogic } from 'scenes/insights/insightVizDataLogic'
+import { keyForInsightLogicProps } from 'scenes/insights/sharedUtils'
+import { pathsTitle } from 'scenes/trends/persons-modal/persons-modal-utils'
+import { openPersonsModal, OpenPersonsModalProps } from 'scenes/trends/persons-modal/PersonsModal'
+import { urls } from 'scenes/urls'
+
+import { actionsAndEventsToSeries } from '~/queries/nodes/InsightQuery/utils/filtersToQueryNode'
+import { InsightActorsQuery, InsightVizNode, NodeKind, PathsLink, PathsQuery } from '~/queries/schema/schema-general'
+import { isPathsQuery } from '~/queries/utils'
+import { ActionFilter, InsightLogicProps, PathType, PropertyFilterType, PropertyOperator } from '~/types'
+
+import type { pathsDataLogicType } from './pathsDataLogicType'
+import { PathNodeData } from './pathUtils'
+import { Paths, PathsNode } from './types'
+
+export const DEFAULT_STEP_LIMIT = 5
+
+const DEFAULT_PATH_LOGIC_KEY = 'default_path_key'
+
+export const pathsDataLogic = kea<pathsDataLogicType>([
+ path((key) => ['scenes', 'paths', 'pathsDataLogic', key]),
+ props({} as InsightLogicProps),
+ key(keyForInsightLogicProps(DEFAULT_PATH_LOGIC_KEY)),
+
+ connect((props: InsightLogicProps) => ({
+ values: [
+ insightVizDataLogic(props),
+ [
+ 'querySource as vizQuerySource',
+ 'insightQuery',
+ 'insightData',
+ 'insightDataLoading',
+ 'insightDataError',
+ 'pathsFilter',
+ 'funnelPathsFilter',
+ 'dateRange',
+ 'theme',
+ ],
+ featureFlagLogic,
+ ['featureFlags'],
+ ],
+ actions: [insightVizDataLogic(props), ['updateInsightFilter', 'updateQuerySource']],
+ })),
+
+ actions({
+ openPersonsModal: (props: { path_start_key?: string; path_end_key?: string; path_dropoff_key?: string }) =>
+ props,
+ viewPathToFunnel: (pathItemCard: PathNodeData) => ({ pathItemCard }),
+ }),
+
+ selectors({
+ results: [
+ (s) => [s.insightQuery, s.insightData],
+ (insightQuery, insightData): PathsLink[] => {
+ return isPathsQuery(insightQuery) ? insightData?.result ?? [] : []
+ },
+ ],
+ paths: [
+ (s) => [s.results],
+ (results): Paths => {
+ const nodes: Record<string, PathsNode> = {}
+ for (const path of results) {
+ if (!nodes[path.source]) {
+ nodes[path.source] = { name: path.source }
+ }
+ if (!nodes[path.target]) {
+ nodes[path.target] = { name: path.target }
+ }
+ }
+
+ return {
+ nodes: Object.values(nodes),
+ links: results,
+ }
+ },
+ ],
+ taxonomicGroupTypes: [
+ (s) => [s.pathsFilter],
+ (pathsFilter) => {
+ const taxonomicGroupTypes: TaxonomicFilterGroupType[] = []
+ if (pathsFilter?.includeEventTypes) {
+ if (pathsFilter?.includeEventTypes.includes(PathType.PageView)) {
+ taxonomicGroupTypes.push(TaxonomicFilterGroupType.PageviewUrls)
+ }
+ if (pathsFilter?.includeEventTypes.includes(PathType.Screen)) {
+ taxonomicGroupTypes.push(TaxonomicFilterGroupType.Screens)
+ }
+ if (pathsFilter?.includeEventTypes.includes(PathType.CustomEvent)) {
+ taxonomicGroupTypes.push(TaxonomicFilterGroupType.CustomEvents)
+ }
+ }
+ taxonomicGroupTypes.push(TaxonomicFilterGroupType.Wildcards)
+ return taxonomicGroupTypes
+ },
+ ],
+ }),
+
+ listeners(({ values }) => ({
+ openPersonsModal: ({ path_start_key, path_end_key, path_dropoff_key }) => {
+ const query: InsightActorsQuery = {
+ kind: NodeKind.InsightActorsQuery,
+ source: {
+ ...values.vizQuerySource,
+ pathsFilter: {
+ ...(values.vizQuerySource as PathsQuery)?.pathsFilter,
+ pathStartKey: path_start_key,
+ pathEndKey: path_end_key,
+ pathDropoffKey: path_dropoff_key,
+ },
+ } as PathsQuery,
+ }
+ const modalProps: OpenPersonsModalProps = {
+ title: pathsTitle({
+ label: path_dropoff_key || path_start_key || path_end_key || 'Pageview',
+ mode: path_dropoff_key ? 'dropOff' : path_start_key ? 'continue' : 'completion',
+ }),
+ query,
+ additionalSelect: {
+ value_at_data_point: 'event_count',
+ matched_recordings: 'matched_recordings',
+ },
+ orderBy: ['event_count DESC, actor_id DESC'],
+ }
+ openPersonsModal(modalProps)
+ },
+ viewPathToFunnel: ({ pathItemCard }) => {
+ const events: ActionFilter[] = []
+ let currentItemCard = pathItemCard
+ while (currentItemCard) {
+ const name = currentItemCard.name.includes('http')
+ ? '$pageview'
+ : currentItemCard.name.replace(/(^[0-9]+_)/, '')
+ events.push({
+ id: name,
+ name: name,
+ type: 'events',
+ order: currentItemCard.depth,
+ ...(currentItemCard.name.includes('http') && {
+ properties: [
+ {
+ key: '$current_url',
+ operator: PropertyOperator.Exact,
+ type: PropertyFilterType.Event,
+ value: currentItemCard.name.replace(/(^[0-9]+_)/, ''),
+ },
+ ],
+ }),
+ })
+ currentItemCard = currentItemCard.targetLinks[0]?.source
greptile
logic: potential infinite loop if there's a cycle in the path data structure - consider adding a maximum iteration limit
diff block
]
)
+ // Helper function to find next/previous non-separator item
+ const findNextFocusableItem = (
+ items: TreeDataItem[],
+ currentIndex: number,
+ direction: 1 | -1
+ ): TreeDataItem | undefined => {
+ let index = currentIndex
+ while (true) {
+ index += direction
+ if (direction > 0 && index >= items.length) {
+ return undefined
+ }
+ if (direction < 0 && index < 0) {
+ return undefined
+ }
+ if (items[index].type !== 'separator') {
+ return items[index]
+ }
+ }
greptile
logic: Potential infinite loop if no non-separator items exist in the direction being searched
suggested fix
let index = currentIndex
+ let loopCount = 0
+ while (loopCount < items.length) {
index += direction
if (direction > 0 && index >= items.length) {
return undefined
}
if (direction < 0 && index < 0) {
return undefined
}
if (items[index].type !== 'separator') {
return items[index]
}
+ loopCount++
}
return undefined
diff block
}
this.instance.persistence?.register({ [SURVEYS]: surveys })
- return callback(surveys)
+ return callback(surveys, {
+ isLoaded: true,
+ })
},
})
} catch (e) {
this._isFetchingSurveys = false
throw e
}
} else {
- return callback(existingSurveys)
+ return callback(existingSurveys, {
+ isLoaded: true,
+ })
+ }
+ }
+
+ /** Helper method to notify all registered callbacks */
+ private _notifySurveyCallbacks(context: { isLoaded: boolean; error?: string }): void {
+ for (const callback of this._surveyCallbacks) {
+ try {
+ if (!context.isLoaded) {
+ callback([], context)
+ } else {
+ this.getSurveys(callback)
+ }
+ } catch (error) {
+ logger.error('Error in survey callback', error)
+ }
}
}
greptile
logic: Potential infinite loop if getSurveys triggers _notifySurveyCallbacks again
diff block
}
}
- try {
- await attachParamsToInvoice({
- sb,
- attachParams,
- invoiceId: subUpdate.latest_invoice as string,
- });
- console.log(" - Inserted latest invoice ID for subscription update");
- } catch (error) {
- console.log(
- "Error inserting latest invoice ID for subscription update",
- error
- );
- }
+ await attachParamsToInvoice({
+ sb,
+ attachParams,
+ invoiceId: subUpdate.latest_invoice as string,
+ logger,
+ });
// 2. Create new subscriptions
let newSubIds = [];
- newSubIds.push(firstExistingSubId);
+ newSubIds.push(firstSub.id);
const newItemSets = itemSets.slice(1);
let invoiceIds = [];
+
+ // CREATE NEW SUBSCRIPTIONS
for (const itemSet of newItemSets) {
- const newSub = await stripeCli.subscriptions.create({
- customer: attachParams.customer.processor.id,
- items: itemSet.items,
- metadata: itemSet.subMeta,
+ // stripeCli.subscriptions.create({
+ // customer: attachParams.customer.processor.id,
+ // items: itemSet.items,
+ // metadata: itemSet.subMeta,
+ // ...((attachParams.invoiceOnly && {
+ // collection_method: "send_invoice",
+ // days_until_due: 30,
+ // }) as any),
+ // });
+ // Line up with first sub
+
+ // 1. Next billing date for first sub
+ const nextCycleAnchor = firstSub.current_period_end * 1000;
+ let nextCycleAnchorUnix = nextCycleAnchor;
+ const naturalBillingDate = addBillingIntervalUnix(
+ Date.now(),
+ itemSet.interval
+ );
+
+ while (true) {
+ const subtractedUnix = subtractBillingIntervalUnix(
+ nextCycleAnchorUnix,
+ itemSet.interval
+ );
+
+ if (subtractedUnix < Date.now()) {
+ break;
+ }
+
+ nextCycleAnchorUnix = subtractedUnix;
+ }
greptile
logic: Potential infinite loop if subtractBillingIntervalUnix keeps returning values above Date.now()
suggested fix
+ let maxIterations = 100; // Prevent infinite loops
while (true) {
+ if (maxIterations-- <= 0) {
break;
}
const subtractedUnix = subtractBillingIntervalUnix(
nextCycleAnchorUnix,
itemSet.interval
);
if (subtractedUnix < Date.now()) {
break;
}
nextCycleAnchorUnix = subtractedUnix;
}
diff block
for {
line, err := reader.ReadString('\n')
if err != nil {
- if err == io.EOF {
- break
+ if err != io.EOF {
+ return nil, err
}
- return nil, err
}
greptile
logic: EOF handling could cause infinite loop - need to break or return when EOF is encountered
suggested fix
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
return nil, err
+ }
diff block
logger.warning("Unexpected credentials provided for Web Connector")
return None
+ def _do_scrape(
+ self, index: int, initial_url: str, scrape_context: ScrapeContext
+ ) -> bool:
+ """returns False if the caller should continue (usually due to skipping duplicates),
+ True if the page scraped normally"""
+
+ if scrape_context.playwright is None:
+ raise RuntimeError("scrape_context.playwright is None")
+
+ if scrape_context.playwright_context is None:
+ raise RuntimeError("scrape_context.playwright_context is None")
+
+ if scrape_context.retry_count > 0:
+ # Add a random delay between retries (exponential backoff)
+ delay = min(2**scrape_context.retry_count + random.uniform(0, 1), 10)
+ logger.info(
+ f"Retry {scrape_context.retry_count}/{self.MAX_RETRIES} for {initial_url} after {delay:.2f}s delay"
+ )
+ time.sleep(delay)
+
+ if scrape_context.restart_playwright:
+ scrape_context.playwright, scrape_context.playwright_context = (
+ start_playwright()
+ )
+ scrape_context.restart_playwright = False
+
+ # Handle cookies for the URL
+ _handle_cookies(scrape_context.playwright_context, initial_url)
+
+ # First do a HEAD request to check content type without downloading the entire content
+ head_response = requests.head(
+ initial_url, headers=DEFAULT_HEADERS, allow_redirects=True
+ )
+ is_pdf = is_pdf_content(head_response)
+
+ if is_pdf or initial_url.lower().endswith(".pdf"):
+ # PDF files are not checked for links
+ response = requests.get(initial_url, headers=DEFAULT_HEADERS)
+ page_text, metadata, images = read_pdf_file(
+ file=io.BytesIO(response.content)
+ )
+ last_modified = response.headers.get("Last-Modified")
+
+ scrape_context.doc_batch.append(
+ Document(
+ id=initial_url,
+ sections=[TextSection(link=initial_url, text=page_text)],
+ source=DocumentSource.WEB,
+ semantic_identifier=initial_url.split("/")[-1],
+ metadata=metadata,
+ doc_updated_at=(
+ _get_datetime_from_last_modified_header(last_modified)
+ if last_modified
+ else None
+ ),
+ )
+ )
+ scrape_context.retry_success = True
+ return False
+
+ page = scrape_context.playwright_context.new_page()
+
+ if self.add_randomness:
+ # Add random mouse movements and scrolling to mimic human behavior
+ page.mouse.move(random.randint(100, 700), random.randint(100, 500))
+
+ # Can't use wait_until="networkidle" because it interferes with the scrolling behavior
+ page_response = page.goto(
+ initial_url,
+ timeout=30000, # 30 seconds
+ wait_until="domcontentloaded", # Wait for DOM to be ready
+ )
+
+ # Add a small random delay to mimic human behavior
+ time.sleep(random.uniform(0.5, 2.0))
+
+ last_modified = (
+ page_response.header_value("Last-Modified") if page_response else None
+ )
+ final_url = page.url
+ if final_url != initial_url:
+ protected_url_check(final_url)
+ initial_url = final_url
+ if initial_url in scrape_context.visited_links:
+ logger.info(
+ f"{index}: {initial_url} redirected to {final_url} - already indexed"
+ )
+ page.close()
+ scrape_context.retry_success = True
+ return False
+
+ logger.info(f"{index}: {initial_url} redirected to {final_url}")
+ scrape_context.visited_links.add(initial_url)
+
+ # If we got here, the request was successful
+ scrape_context.retry_success = True
+
+ if self.scroll_before_scraping:
+ scroll_attempts = 0
+ previous_height = page.evaluate("document.body.scrollHeight")
+ while scroll_attempts < WEB_CONNECTOR_MAX_SCROLL_ATTEMPTS:
+ page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
+ page.wait_for_load_state("networkidle", timeout=30000)
+ new_height = page.evaluate("document.body.scrollHeight")
+ if new_height == previous_height:
+ break # Stop scrolling when no more content is loaded
+ previous_height = new_height
+ scroll_attempts += 1
+
+ content = page.content()
+ soup = BeautifulSoup(content, "html.parser")
+
+ if self.recursive:
+ internal_links = get_internal_links(
+ scrape_context.base_url, initial_url, soup
+ )
+ for link in internal_links:
+ if link not in scrape_context.visited_links:
+ scrape_context.to_visit.append(link)
+
+ if page_response and str(page_response.status)[0] in ("4", "5"):
+ scrape_context.last_error = f"Skipped indexing {initial_url} due to HTTP {page_response.status} response"
+ logger.info(scrape_context.last_error)
+ return False
+
+ parsed_html = web_html_cleanup(soup, self.mintlify_cleanup)
+
+ """For websites containing iframes that need to be scraped,
+ the code below can extract text from within these iframes.
+ """
+ logger.debug(f"{index}: Length of cleaned text {len(parsed_html.cleaned_text)}")
+ if JAVASCRIPT_DISABLED_MESSAGE in parsed_html.cleaned_text:
+ iframe_count = page.frame_locator("iframe").locator("html").count()
+ if iframe_count > 0:
+ iframe_texts = (
+ page.frame_locator("iframe").locator("html").all_inner_texts()
+ )
+ document_text = "\n".join(iframe_texts)
+ """ 700 is the threshold value for the length of the text extracted
+ from the iframe based on the issue faced """
+ if len(parsed_html.cleaned_text) < IFRAME_TEXT_LENGTH_THRESHOLD:
+ parsed_html.cleaned_text = document_text
+ else:
+ parsed_html.cleaned_text += "\n" + document_text
+
+ # Sometimes pages with #! will serve duplicate content
+ # There are also just other ways this can happen
+ hashed_text = hash((parsed_html.title, parsed_html.cleaned_text))
+ if hashed_text in scrape_context.content_hashes:
+ logger.info(
+ f"{index}: Skipping duplicate title + content for {initial_url}"
+ )
+ return False
+
+ scrape_context.content_hashes.add(hashed_text)
+
+ scrape_context.doc_batch.append(
+ Document(
+ id=initial_url,
+ sections=[TextSection(link=initial_url, text=parsed_html.cleaned_text)],
+ source=DocumentSource.WEB,
+ semantic_identifier=parsed_html.title or initial_url,
+ metadata={},
+ doc_updated_at=(
+ _get_datetime_from_last_modified_header(last_modified)
+ if last_modified
+ else None
+ ),
+ )
+ )
+
+ page.close()
+ return True
+
def load_from_state(self) -> GenerateDocumentsOutput:
"""Traverses through all pages found on the website
and converts them into documents"""
- visited_links: set[str] = set()
- to_visit: list[str] = self.to_visit_list
- content_hashes = set()
- if not to_visit:
+ if not self.to_visit_list:
raise ValueError("No URLs to visit")
- base_url = to_visit[0] # For the recursive case
- doc_batch: list[Document] = []
+ base_url = self.to_visit_list[0] # For the recursive case
+ check_internet_connection(base_url) # make sure we can connect to the base url
- # make sure we can connect to the base url
- check_internet_connection(base_url)
+ scrape_context = ScrapeContext(base_url=base_url, to_visit=self.to_visit_list)
- # Needed to report error
- at_least_one_doc = False
- last_error = None
+ scrape_context.playwright, scrape_context.playwright_context = (
+ start_playwright()
+ )
- playwright, context = start_playwright()
- restart_playwright = False
- while to_visit:
- initial_url = to_visit.pop()
- if initial_url in visited_links:
+ while scrape_context.to_visit:
+ initial_url = scrape_context.to_visit.pop()
+ if initial_url in scrape_context.visited_links:
continue
- visited_links.add(initial_url)
+ scrape_context.visited_links.add(initial_url)
try:
protected_url_check(initial_url)
except Exception as e:
- last_error = f"Invalid URL {initial_url} due to {e}"
- logger.warning(last_error)
+ scrape_context.last_error = f"Invalid URL {initial_url} due to {e}"
+ logger.warning(scrape_context.last_error)
continue
- index = len(visited_links)
+ index = len(scrape_context.visited_links)
logger.info(f"{index}: Visiting {initial_url}")
# Add retry mechanism with exponential backoff
- max_retries = 3
- retry_count = 0
- retry_success = False
+ scrape_context.retry_count = 0
+ scrape_context.retry_success = False
- while retry_count < max_retries and not retry_success:
+ while (
+ scrape_context.retry_count < self.MAX_RETRIES
+ and not scrape_context.retry_success
+ ):
try:
- if retry_count > 0:
- # Add a random delay between retries (exponential backoff)
- delay = min(2**retry_count + random.uniform(0, 1), 10)
- logger.info(
- f"Retry {retry_count}/{max_retries} for {initial_url} after {delay:.2f}s delay"
- )
- time.sleep(delay)
-
- if restart_playwright:
- playwright, context = start_playwright()
- restart_playwright = False
-
- # Handle cookies for the URL
- _handle_cookies(context, initial_url)
-
- # First do a HEAD request to check content type without downloading the entire content
- head_response = requests.head(
- initial_url, headers=DEFAULT_HEADERS, allow_redirects=True
- )
- is_pdf = is_pdf_content(head_response)
-
- if is_pdf or initial_url.lower().endswith(".pdf"):
- # PDF files are not checked for links
- response = requests.get(initial_url, headers=DEFAULT_HEADERS)
- page_text, metadata, images = read_pdf_file(
- file=io.BytesIO(response.content)
- )
- last_modified = response.headers.get("Last-Modified")
-
- doc_batch.append(
- Document(
- id=initial_url,
- sections=[
- TextSection(link=initial_url, text=page_text)
- ],
- source=DocumentSource.WEB,
- semantic_identifier=initial_url.split("/")[-1],
- metadata=metadata,
- doc_updated_at=(
- _get_datetime_from_last_modified_header(
- last_modified
- )
- if last_modified
- else None
- ),
- )
- )
- retry_success = True
- continue
-
- page = context.new_page()
-
- if self.add_randomness:
- # Add random mouse movements and scrolling to mimic human behavior
- page.mouse.move(
- random.randint(100, 700), random.randint(100, 500)
- )
-
- # Can't use wait_until="networkidle" because it interferes with the scrolling behavior
- page_response = page.goto(
- initial_url,
- timeout=30000, # 30 seconds
- wait_until="domcontentloaded", # Wait for DOM to be ready
- )
-
- # Add a small random delay to mimic human behavior
- time.sleep(random.uniform(0.5, 2.0))
-
- # Check if we got a 403 error
- if page_response and page_response.status == 403:
- logger.warning(
- f"Received 403 Forbidden for {initial_url}, retrying..."
- )
- page.close()
- retry_count += 1
+ normal_scrape = self._do_scrape(index, initial_url, scrape_context)
+ if not normal_scrape:
continue
greptile
logic: This continue statement skips incrementing retry_count and restarting playwright, potentially causing an infinite loop if normal_scrape is consistently False
suggested fix
if not normal_scrape:
+ scrape_context.retry_success = True # Mark as success since we handled it
+ break # Exit retry loop since this was handled normally
diff block
throw error;
}
}
+
+export async function deleteNextJsPage(projectRoot: string, pagePath: string, isDir: boolean) {
+ try {
+ const routerConfig = await detectRouterType(projectRoot);
+
+ if (!routerConfig) {
+ throw new Error('Could not detect Next.js router type');
+ }
+
+ if (routerConfig.type !== 'app') {
+ throw new Error('Page deletion is only supported for App Router projects for now.');
+ }
+
+ const fullPath = path.join(routerConfig.basePath, pagePath);
+
+ // Check if file/folder exists
+ let stats;
+ try {
+ stats = await fs.stat(fullPath);
+ } catch (err: any) {
+ if (err.code === 'ENOENT') {
+ throw new Error('Selected page not found');
+ }
+ throw err;
+ }
+
+ if (isDir) {
+ await fs.rm(fullPath, { recursive: true, force: true });
+ } else {
+ const selectedFilePath = path.join(fullPath, 'page.tsx');
+ await fs.unlink(selectedFilePath);
+ await cleanupEmptyFolders(path.dirname(fullPath));
+ }
+
+ console.log(`Deleted: ${fullPath}`);
+ return true;
+ } catch (error) {
+ console.error('Error deleting page:', error);
+ throw error;
+ }
+}
+
+async function cleanupEmptyFolders(folderPath: string) {
+ while (folderPath !== path.dirname(folderPath)) {
greptile
logic: potential infinite loop if path.dirname(folderPath) equals folderPath but isn't the project root
diff block
DispatchQueue.main.asyncAfter(deadline: .now() + Config.debounceInterval, execute: workItem)
}
+
+ // MARK: - Window resize / move
+
+ private func handleWindowBounds(for element: AXUIElement) {
+ handleExternalElement(element) { [weak self] elementPid in
+ if let window = self?.findWindow(from: element) {
+ self?.activeWindowElement = window
+ }
+ }
+ }
+
+ private func findWindow(from element: AXUIElement) -> AXUIElement? {
+ if let role = element.role(), role == kAXWindowRole {
+ return element
+ }
+
+ var currentElement = element
+ while true {
+ guard let parent = currentElement.parent() else { break }
+
+ if let role = parent.role(), role == kAXWindowRole {
+ return parent
+ }
+
+ currentElement = parent
+ }
+
+ var value: AnyObject?
+ let result = AXUIElementCopyAttributeValue(currentElement, kAXWindowsAttribute as CFString, &value)
+
+ if result == .success,
+ let windows = value as? [AXUIElement],
+ let window = windows.first {
+ return window
+ }
+
+ return nil
+ }
greptile
logic: The findWindow method could enter an infinite loop if there's a circular parent reference in the accessibility hierarchy. Consider adding a max depth limit.
diff block
+import { RivetClient } from "@rivet-gg/api";
+import WebSocket from 'ws';
+import fetch from 'node-fetch';
+
+// Can be opt since they're not required for dev
+const RIVET_ENDPOINT = process.env.RIVET_ENDPOINT;
+const RIVET_SERVICE_TOKEN = process.env.RIVET_SERVICE_TOKEN;
+const RIVET_PROJECT = process.env.RIVET_PROJECT;
+const RIVET_ENVIRONMENT = process.env.RIVET_ENVIRONMENT;
+
+let region = process.env.REGION;
+if (!region || region.length === 0) {
+ region = undefined;
+}
+
+const client = new RivetClient({
+ environment: RIVET_ENDPOINT,
+ token: RIVET_SERVICE_TOKEN,
+});
+
+async function run() {
+ let actorId: string | undefined;
+ try {
+ console.log("Creating actor", { region });
+ const { actor } = await client.actor.create({
+ project: RIVET_PROJECT,
+ environment: RIVET_ENVIRONMENT,
+ body: {
+ region,
+ tags: {
+ name: "ws",
+ },
+ buildTags: { name: "ws", current: "true" },
+ network: {
+ ports: {
+ http: {
+ protocol: "https",
+ // internalPort: 80,
+ routing: {
+ guard: {},
+ },
+ },
+ },
+ },
+ lifecycle: {
+ durable: false,
+ },
+ },
+ });
+ actorId = actor.id;
+
+ const port = actor.network.ports.http;
+ if (!port) {
+ throw new Error("missing port http");
+ }
+
+ const actorOrigin = `${port.protocol}://${port.hostname}:${port.port}${port.path ?? ""}`;
+ console.log("Created actor at", actorOrigin);
+
+ // Retry loop for HTTP health check
+ console.time(`ready-${actorId}`);
+ while (true) {
+ try {
+ const response = await fetch(`${actorOrigin}/health`);
+ if (response.ok) {
+ console.log("Health check passed");
+ console.timeEnd(`ready-${actorId}`);
+ break;
+ } else {
+ console.error(
+ `Health check failed with status: ${response.status}, retrying...`,
+ );
+ }
+ } catch (error) {
+ console.error("Health check request error:", error);
+ }
+ await new Promise((resolve) => setTimeout(resolve, 100));
+ }
greptile
logic: Health check retry loop needs a timeout or maximum retry count to prevent infinite loops in case of persistent failures
```suggestion
+ let attempts = 0;
+ const MAX_ATTEMPTS = 50; // 5 seconds total with 100ms delay
+ while (attempts < MAX_ATTEMPTS) {
try {
const response = await fetch(`${actorOrigin}/health`);
if (response.ok) {
console.log("Health check passed");
console.timeEnd(`ready-${actorId}`);
break;
} else {
console.error(
`Health check failed with status: ${response.status}, retrying...`,
);
}
} catch (error) {
console.error("Health check request error:", error);
}
+ attempts++;
await new Promise((resolve) => setTimeout(resolve, 100));
}
+ if (attempts >= MAX_ATTEMPTS) {
+ throw new Error("Health check timed out after 5 seconds");
}
```
diff block
+import fs from "fs";
+import { readFile } from "fs";
+import path from "path";
+import { promisify } from "util";
+
+import { environment } from "@raycast/api";
+import { useCachedPromise, useCachedState } from "@raycast/utils";
+import ini from "ini";
+import { useMemo, useEffect } from "react";
+import initSqlJs, { Database } from "sql.js";
+
+import { BROWSERS_BUNDLE_ID } from "./useAvailableBrowsers";
+
+const read = promisify(readFile);
+
+const ZEN_FOLDER = path.join(process.env.HOME || "", "Library", "Application Support", "zen");
+
+const folderNames: Record<string, string> = {
+ menu: "Bookmark Menu",
+ mobile: "Mobile Bookmarks",
+ tags: "Tags",
+ toolbar: "Toolbar",
+ unfiled: "Other Bookmarks",
+};
+
+async function getZenProfiles() {
+ if (!fs.existsSync(`${ZEN_FOLDER}/profiles.ini`)) {
+ return { profiles: [], defaultProfile: "" };
+ }
+
+ const file = await read(`${ZEN_FOLDER}/profiles.ini`, "utf-8");
+ const iniFile = ini.parse(file);
+
+ const profiles = Object.keys(iniFile)
+ .filter((key) => {
+ if (key.startsWith("Profile")) {
+ const profilePath = iniFile[key].Path;
+ const fullProfilePath = path.join(ZEN_FOLDER, profilePath);
+ return fs.existsSync(path.join(fullProfilePath, "places.sqlite"));
+ }
+ return false;
+ })
+ .map((key) => ({
+ name: iniFile[key].Name || iniFile[key].Path,
+ path: iniFile[key].Path,
+ isDefault: iniFile[key].Default === "1" || iniFile[key].Path.includes(".Default (alpha)"),
+ }));
+
+ let defaultProfile = profiles.find((p) => p.isDefault)?.path;
+ if (!defaultProfile && profiles.length > 0) {
+ defaultProfile = profiles[0].path;
+ }
+
+ profiles.sort((a, b) => {
+ if (a.path === defaultProfile) return -1;
+ if (b.path === defaultProfile) return 1;
+ return a.name.localeCompare(b.name);
+ });
+
+ return { profiles, defaultProfile };
+}
+
+type Folder = {
+ id: number;
+ parentId: number;
+ title: string;
+ guid: string;
+};
+
+function getZenFolders(db: Database) {
+ const folders = [];
+ const statement = db.prepare(
+ `
+ SELECT moz_bookmarks.id AS id,
+ moz_bookmarks.parent AS parentId,
+ moz_bookmarks.title AS title,
+ moz_bookmarks.guid AS guid
+ FROM moz_bookmarks
+ WHERE moz_bookmarks.type = 2
+ AND moz_bookmarks.title IS NOT NULL
+ AND moz_bookmarks.title <> ''
+ AND moz_bookmarks.fk IS NULL;
+ `,
+ );
+
+ while (statement.step()) {
+ const row = statement.getAsObject() as Folder;
+ folders.push(row);
+ }
+
+ statement.free();
+ return folders;
+}
+
+type Bookmark = {
+ id: number;
+ parentId: number;
+ title: string;
+ urlString: string;
+};
+
+function getZenBookmarks(db: Database) {
+ const bookmarks = [];
+ const statement = db.prepare(
+ `
+ SELECT moz_places.id AS id,
+ moz_bookmarks.parent AS parentId,
+ moz_bookmarks.title AS title,
+ moz_places.url AS urlString
+ FROM moz_bookmarks LEFT JOIN moz_places ON moz_bookmarks.fk = moz_places.id
+ WHERE moz_bookmarks.type = 1
+ AND moz_bookmarks.title IS NOT NULL
+ AND moz_places.url IS NOT NULL;
+ `,
+ );
+
+ while (statement.step()) {
+ const row = statement.getAsObject() as Bookmark;
+ bookmarks.push(row);
+ }
+
+ statement.free();
+ return bookmarks;
+}
+
+function processFolderHierarchy(folders: Folder[]): Folder[] {
+ const processedFolders = [...folders];
+
+ // Find the toolbar folder ID
+ const toolbarFolder = processedFolders.find((f) => f.parentId === 1 && f.title.toLowerCase() === "toolbar");
+ const toolbarId = toolbarFolder?.id;
+
+ return processedFolders.map((folder) => {
+ // For root-level folders, use friendly names
+ if (folder.parentId === 1) {
+ const friendlyName = folderNames[folder.title.toLowerCase()];
+ return {
+ ...folder,
+ title: friendlyName || folder.title,
+ };
+ }
+
+ // Build hierarchy for non-root folders
+ const hierarchy = [folder.title];
+ let currentFolder = folder;
+
+ while (currentFolder.parentId !== 1) {
+ const parent = processedFolders.find((f) => f.id === currentFolder.parentId);
+ if (!parent) break;
+
greptile
logic: Potential infinite loop if folder structure has circular references
suggested fix
+ const visited = new Set();
while (currentFolder.parentId !== 1) {
+ if (visited.has(currentFolder.id)) break;
+ visited.add(currentFolder.id);
const parent = processedFolders.find((f) => f.id === currentFolder.parentId);
if (!parent) break;
diff block
+import { mergeAttributes, Node } from '@tiptap/core'
+import { Fragment, Node as PMNode, Slice } from 'prosemirror-model'
+import { EditorState, NodeSelection, Plugin, PluginKey, TextSelection } from 'prosemirror-state'
+import { Decoration, DecorationSet } from 'prosemirror-view'
+import { blockToNode, inlineContentToNodes } from '../../../api/nodeConversions/nodeConversions'
+
+import { BlockChildrenType } from '../api/blockTypes'
+import { ResolvedPos } from '@tiptap/pm/model'
+import { EditorView } from '@tiptap/pm/view'
+import { mergeCSSClasses } from '../../../shared/utils'
+import { BlockNoteDOMAttributes, BlockSchema, PartialBlock } from '../api/blockTypes'
+import { getBlockInfoFromPos } from '../helpers/getBlockInfoFromPos'
+import { getGroupInfoFromPos } from '../helpers/getGroupInfoFromPos'
+import styles from './Block.module.css'
+import BlockAttributes from './BlockAttributes'
+
+const SelectionPluginKey = new PluginKey('selectionPluginKey')
+const ClickSelectionPluginKey = new PluginKey('clickSelectionPluginKey')
+const PastePluginKey = new PluginKey('pastePluginKey')
+const headingLinePluginKey = new PluginKey('HeadingLinePlugin')
+
+const SelectionPlugin = new Plugin({
+ key: SelectionPluginKey,
+ state: {
+ init() {
+ return DecorationSet.empty
+ },
+ apply(tr, oldState) {
+ return tr.getMeta(SelectionPluginKey) || oldState
+ },
+ },
+ props: {
+ decorations(state) {
+ return this.getState(state)
+ },
+ },
+})
+
+const ClickSelectionPlugin = new Plugin({
+ key: ClickSelectionPluginKey,
+ props: {
+ handleDOMEvents: {
+ mousedown(view, event) {
+ if (event.shiftKey && event.button === 0) {
+ const { state } = view
+ const editorBoundingBox = (view.dom.firstChild! as HTMLElement).getBoundingClientRect()
+ const coords = {
+ left: editorBoundingBox.left + editorBoundingBox.width / 2, // take middle of editor
+ top: event.clientY,
+ }
+ let pos = view.posAtCoords(coords)
+ if (!pos) {
+ return undefined
+ }
+ const { selection } = state
+ const selectedPos = state.doc.resolve(selection.from)
+ const nodePos = state.doc.resolve(pos.pos)
+ if (selectedPos.start() === selection.from && pos.pos === nodePos.end()) {
+ const decoration = Decoration.widget(nodePos.pos, () => {
+ const span = document.createElement('span')
+ span.style.backgroundColor = 'blue'
+ span.style.width = '10px'
+ span.style.height = '10px'
+ return span
+ })
+ const decorationSet = DecorationSet.create(state.doc, [decoration])
+ view.dispatch(state.tr.setMeta(SelectionPluginKey, decorationSet))
+ }
+ return false
+ }
+ return false
+ },
+ },
+ },
+})
+
+const PastePlugin = new Plugin({
+ key: PastePluginKey,
+ props: {
+ handlePaste: (view, event) => {
+ if (!event.clipboardData) {
+ return false
+ }
+
+ const { state } = view
+ let { tr } = state
+ const { selection } = state
+ const { $from, $to } = selection
+
+ const targetNode = state.doc.resolve($from.pos).parent
+
+ if (targetNode.type.name === 'image') {
+ tr = tr.insertText(event.clipboardData.getData('text/plain'), $from.pos, $to.pos)
+ view.dispatch(tr)
+ return true
+ }
+
+ return false
+ },
+ },
+})
+
+const headingLinePlugin = new Plugin({
+ key: headingLinePluginKey,
+ view(editorView) {
+ return new HeadingLinePlugin(editorView)
+ },
+})
+
+class HeadingLinePlugin {
+ private line: HTMLElement
+ constructor(view: EditorView) {
+ this.line = document.createElement('div')
+ this.line.style.transition = 'all 0.15s ease-in-out'
+ this.line.style.pointerEvents = 'none'
+ this.line.style.display = ''
+ this.line.style.opacity = '0'
+ view.dom.parentNode?.appendChild(this.line)
+
+ this.update(view, null)
+ }
+
+ update(view: EditorView, lastState: EditorState | null) {
+ let state = view.state
+ // Don't do anything if the document/selection didn't change
+ if (lastState && lastState.doc.eq(state.doc) && lastState.selection.eq(state.selection)) return
+
+ let res = getNearestHeadingFromPos(state, state.selection.from)
+
+ if (res && res.heading?.type.name === 'heading') {
+ let { node } = view.domAtPos(res.groupStartPos)
+
+ let rect = (node as HTMLElement).getBoundingClientRect()
+ let editorRect = view.dom.getBoundingClientRect()
+ let groupPadding = 10
+ let editorPaddingTop = 40
+ this.line.style.position = 'absolute'
+ this.line.style.top = `${rect.top + editorPaddingTop + groupPadding - editorRect.top}px`
+ this.line.style.left = `${rect.left - editorRect.left + groupPadding}px`
+ this.line.style.width = `2.5px`
+ this.line.style.height = `${rect.height - groupPadding * 2}px`
+ this.line.style.backgroundColor = 'var(--brand5)'
+ this.line.style.opacity = '0.4'
+ } else {
+ this.line.style.opacity = '0'
+ return
+ }
+ }
+
+ destroy() {
+ this.line.remove()
+ }
+}
+
+function getNearestHeadingFromPos(state: EditorState, pos: number) {
+ const $pos = state.doc.resolve(pos)
+ const maxDepth = $pos.depth
+ let group = $pos.node(maxDepth)
+ let heading = group.firstChild
+ let depth = maxDepth
+
+ if (maxDepth > 3) {
+ while (true) {
+ if (depth < 0) {
+ break
+ }
+
+ if (group.type.name === 'blockContainer' && heading?.type.name === 'heading') {
+ break
+ }
+
+ depth -= 1
+ group = $pos.node(depth)
+ heading = group.firstChild
+ }
+ return {
+ depth,
+ groupStartPos: $pos.start(depth),
+ heading,
+ group,
+ $pos,
+ }
+ }
+
+ return
+}
+
+export function getParentBlockFromPos(state: EditorState, pos: number) {
+ const $pos = state.doc.resolve(pos)
+ const depth = $pos.depth
+
+ // if (depth > 3 && container.type.name == 'blockContainer') {
+ if (depth > 3) {
+ let parent = $pos.node(depth - 3)
+ let parentGroup = $pos.node(depth - 2)
+ let parentPos = $pos.start(depth - 3)
+ return {
+ parentGroup,
+ parentBlock: parent.firstChild,
+ parentPos,
+ depth,
+ $pos,
+ }
+ }
+
+ return
+}
+declare module '@tiptap/core' {
+ interface Commands<ReturnType> {
+ block: {
+ BNCreateBlock: (pos: number) => ReturnType
+ BNDeleteBlock: (posInBlock: number) => ReturnType
+ BNMergeBlocks: (posBetweenBlocks: number) => ReturnType
+ BNSplitBlock: (posInBlock: number, keepType: boolean) => ReturnType
+ BNSplitHeadingBlock: (posInBlock: number) => ReturnType
+ BNUpdateBlock: <BSchema extends BlockSchema>(posInBlock: number, block: PartialBlock<BSchema>) => ReturnType
+ BNCreateOrUpdateBlock: <BSchema extends BlockSchema>(
+ posInBlock: number,
+ block: PartialBlock<BSchema>,
+ ) => ReturnType
+ UpdateGroupChildren: (
+ group: PMNode,
+ groupPos: ResolvedPos,
+ groupLevel: number,
+ listType: BlockChildrenType,
+ indent: number,
+ ) => ReturnType
+ UpdateGroup: (
+ posInBlock: number,
+ listType: BlockChildrenType,
+ tab: boolean,
+ // start?: string,
+ isSank?: boolean,
+ turnInto?: boolean,
+ ) => ReturnType
+ }
+ }
+}
+
+/**
+ * The main "Block node" documents consist of
+ */
+export const BlockContainer = Node.create<{
+ domAttributes?: BlockNoteDOMAttributes
+}>({
+ name: 'blockContainer',
+ group: 'blockContainer',
+ // A block always contains content, and optionally a blockGroup which contains nested blocks
+ content: 'blockContent blockGroup?',
+ // Ensures content-specific keyboard handlers trigger first.
+ priority: 50,
+ defining: true,
+
+ parseHTML() {
+ return [
+ {
+ tag: 'div',
+ getAttrs: (element) => {
+ if (typeof element === 'string') {
+ return false
+ }
+
+ const attrs: Record<string, string> = {}
+ for (const [nodeAttr, HTMLAttr] of Object.entries(BlockAttributes)) {
+ if (element.getAttribute(HTMLAttr)) {
+ attrs[nodeAttr] = element.getAttribute(HTMLAttr)!
+ }
+ }
+
+ if (element.getAttribute('data-node-type') === 'blockContainer') {
+ return attrs
+ }
+
+ return false
+ },
+ },
+ ]
+ },
+
+ renderHTML({ HTMLAttributes }) {
+ const domAttributes = this.options.domAttributes?.blockContainer || {}
+
+ return [
+ 'div',
+ mergeAttributes(HTMLAttributes, {
+ class: styles.blockOuter,
+ 'data-node-type': 'block-outer',
+ }),
+ [
+ 'div',
+ mergeAttributes(
+ {
+ ...domAttributes,
+ class: mergeCSSClasses(styles.block, domAttributes.class),
+ 'data-node-type': this.name,
+ },
+ HTMLAttributes,
+ ),
+ 0,
+ ],
+ ]
+ },
+
+ addCommands() {
+ return {
+ // Creates a new text block at a given position.
+ BNCreateBlock:
+ (pos) =>
+ ({ state, dispatch }) => {
+ const newBlock = state.schema.nodes['blockContainer'].createAndFill()!
+
+ if (dispatch) {
+ state.tr.insert(pos, newBlock)
+ }
+
+ return true
+ },
+ // Deletes a block at a given position.
+ BNDeleteBlock:
+ (posInBlock) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { startPos, endPos } = blockInfo
+
+ if (dispatch) {
+ state.tr.deleteRange(startPos, endPos)
+ }
+
+ return true
+ },
+ // Updates a block at a given position.
+ BNUpdateBlock:
+ (posInBlock, block) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { startPos, endPos, node, contentNode } = blockInfo
+
+ if (dispatch) {
+ // Adds blockGroup node with child blocks if necessary.
+ if (block.children !== undefined && block.children.length > 0) {
+ const childNodes = []
+
+ // Creates ProseMirror nodes for each child block, including their descendants.
+ for (const child of block.children) {
+ childNodes.push(blockToNode(child, state.schema))
+ }
+
+ // Checks if a blockGroup node already exists.
+ if (node.childCount === 2) {
+ // Replaces all child nodes in the existing blockGroup with the ones created earlier.
+ state.tr.replace(
+ startPos + contentNode.nodeSize + 1,
+ endPos - 1,
+ new Slice(Fragment.from(childNodes), 0, 0),
+ )
+ } else {
+ // Inserts a new blockGroup containing the child nodes created earlier.
+ state.tr.insert(
+ startPos + contentNode.nodeSize,
+ state.schema.nodes['blockGroup'].create({}, childNodes),
+ )
+ }
+ }
+
+ // Replaces the blockContent node's content if necessary.
+ if (block.content !== undefined) {
+ let content: PMNode[] = []
+
+ // Checks if the provided content is a string or InlineContent[] type.
+ if (typeof block.content === 'string') {
+ // Adds a single text node with no marks to the content.
+ content.push(state.schema.text(block.content))
+ } else {
+ // Adds a text node with the provided styles converted into marks to the content, for each InlineContent
+ // object.
+ content = inlineContentToNodes(block.content, state.schema)
+ }
+
+ // Replaces the contents of the blockContent node with the previously created text node(s).
+ state.tr.replace(
+ startPos + 1,
+ startPos + contentNode.nodeSize - 1,
+ new Slice(Fragment.from(content), 0, 0),
+ )
+ }
+
+ // Changes the blockContent node type and adds the provided props as attributes. Also preserves all existing
+ // attributes that are compatible with the new type.
+ state.tr.setNodeMarkup(startPos, block.type === undefined ? undefined : state.schema.nodes[block.type], {
+ ...contentNode.attrs,
+ ...block.props,
+ })
+
+ // Adds all provided props as attributes to the parent blockContainer node too, and also preserves existing
+ // attributes.
+ let providedProps = {
+ ...node.attrs,
+ ...block.props,
+ }
+ state.tr.setNodeMarkup(startPos - 1, undefined, providedProps)
+ }
+
+ return true
+ },
+ // Appends the text contents of a block to the nearest previous block, given a position between them. Children of
+ // the merged block are moved out of it first, rather than also being merged.
+ //
+ // In the example below, the position passed into the function is between Block1 and Block2.
+ //
+ // Block1
+ // Block2
+ // Block3
+ // Block4
+ // Block5
+ //
+ // Becomes:
+ //
+ // Block1
+ // Block2Block3
+ // Block4
+ // Block5
+ BNMergeBlocks:
+ (posBetweenBlocks) =>
+ ({ state, dispatch }) => {
+ const nextNodeIsBlock = state.doc.resolve(posBetweenBlocks + 1).node().type.name === 'blockContainer'
+ const prevNodeIsBlock = state.doc.resolve(posBetweenBlocks - 1).node().type.name === 'blockContainer'
+
+ if (!nextNodeIsBlock || !prevNodeIsBlock) {
+ return false
+ }
+
+ const nextBlockInfo = getBlockInfoFromPos(state.doc, posBetweenBlocks + 1)
+
+ const { node, contentNode, startPos, endPos, depth } = nextBlockInfo!
+
+ // Removes a level of nesting all children of the next block by 1 level, if it contains both content and block
+ // group nodes.
+ if (node.childCount === 2) {
+ const childBlocksStart = state.doc.resolve(startPos + contentNode.nodeSize + 1)
+ const childBlocksEnd = state.doc.resolve(endPos - 1)
+ const childBlocksRange = childBlocksStart.blockRange(childBlocksEnd)
+
+ // Moves the block group node inside the block into the block group node that the current block is in.
+ if (dispatch) {
+ state.tr.lift(childBlocksRange!, depth - 1)
+ }
+ }
+
+ let prevBlockEndPos = posBetweenBlocks - 1
+ let prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+
+ // Finds the nearest previous block, regardless of nesting level.
+ while (prevBlockInfo!.numChildBlocks > 0) {
+ prevBlockEndPos--
+ prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+ if (prevBlockInfo === undefined) {
+ return false
+ }
+ }
+
+ // Deletes next block and adds its text content to the nearest previous block.
+
+ if (dispatch) {
+ dispatch(
+ state.tr
+ .deleteRange(startPos, startPos + contentNode.nodeSize)
+ .replace(prevBlockEndPos - 1, startPos, new Slice(contentNode.content, 0, 0))
+ .scrollIntoView(),
+ )
+
+ state.tr.setSelection(new TextSelection(state.doc.resolve(prevBlockEndPos - 1)))
+ }
+
+ return true
+ },
+ // Splits a block at a given position. Content after the position is moved to a new block below, at the same
+ // nesting level.
+ BNSplitBlock:
+ (posInBlock, keepType) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { contentNode, contentType, startPos, endPos, depth } = blockInfo
+
+ const originalBlockContent = state.doc.cut(startPos + 1, posInBlock)
+ const newBlockContent = state.doc.cut(posInBlock, endPos - 1)
+
+ const newBlock = state.schema.nodes['blockContainer'].createAndFill()!
+
+ const newBlockInsertionPos = endPos + 1
+ const newBlockContentPos = newBlockInsertionPos + 2
+
+ if (dispatch) {
+ // Creates a new block. Since the schema requires it to have a content node, a paragraph node is created
+ // automatically, spanning newBlockContentPos to newBlockContentPos + 1.
+ state.tr.insert(newBlockInsertionPos, newBlock)
+
+ // Replaces the content of the newly created block's content node. Doesn't replace the whole content node so
+ // its type doesn't change.
+ state.tr.replace(
+ newBlockContentPos,
+ newBlockContentPos + 1,
+ newBlockContent.content.size > 0
+ ? new Slice(Fragment.from(newBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+
+ // Changes the type of the content node. The range doesn't matter as long as both from and to positions are
+ // within the content node.
+ if (keepType) {
+ state.tr.setBlockType(
+ newBlockContentPos,
+ newBlockContentPos,
+ state.schema.node(contentType).type,
+ contentNode.attrs,
+ )
+ }
+
+ // Sets the selection to the start of the new block's content node.
+ state.tr.setSelection(new TextSelection(state.doc.resolve(newBlockContentPos)))
+
+ // Replaces the content of the original block's content node. Doesn't replace the whole content node so its
+ // type doesn't change.
+ state.tr.replace(
+ startPos + 1,
+ endPos - 1,
+ originalBlockContent.content.size > 0
+ ? new Slice(Fragment.from(originalBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+ }
+
+ return true
+ },
+ // Splits a block at a given position. Content after the position is moved to a new block below, at the same
+ // nesting level.
+ BNSplitHeadingBlock:
+ (posInBlock) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+ let { node, startPos, contentNode, depth } = blockInfo
+ if (node.childCount === 1) {
+ setTimeout(() => {
+ this.editor
+ .chain()
+ .deleteSelection()
+ .BNSplitBlock(state.selection.from, false)
+ .sinkListItem('blockContainer')
+ .UpdateGroup(-1, blockInfo.node.attrs.listType, true)
+ .run()
+ })
+ } else {
+ const originalBlockContent = state.doc.cut(startPos + 1, state.selection.from)
+ let newBlockContent = state.doc.cut(state.selection.from, startPos + contentNode.nodeSize - 1)
+ const newBlock = state.schema.nodes['blockContainer'].createAndFill()!
+ const newBlockInsertionPos = startPos + contentNode.nodeSize + 1
+ const newBlockContentPos = newBlockInsertionPos + 2
+
+ if (dispatch) {
+ // Creates a new block. Since the schema requires it to have a content node, a paragraph node is created
+ // automatically, spanning newBlockContentPos to newBlockContentPos + 1.
+ state.tr.insert(newBlockInsertionPos, newBlock)
+
+ // // Replaces the content of the newly created block's content node. Doesn't replace the whole content node so
+ // // its type doesn't change.
+ state.tr.replace(
+ newBlockContentPos,
+ newBlockContentPos + 1,
+ newBlockContent.content.size > 0
+ ? new Slice(Fragment.from(newBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+
+ // Sets the selection to the start of the new block's content node.
+ state.tr.setSelection(new TextSelection(state.doc.resolve(newBlockContentPos)))
+
+ state.tr.replace(
+ startPos + 1,
+ startPos + contentNode.nodeSize - 1,
+ originalBlockContent.content.size > 0
+ ? new Slice(Fragment.from(originalBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+ }
+ }
+ return true
+ },
+ // Updates group's child groups.
+ UpdateGroupChildren:
+ (group, groupPos, groupLevel, listType, indent) =>
+ ({ state, dispatch }) => {
+ if (dispatch) {
+ group.descendants((child, pos) => {
+ // If child is a group, update it's list level attribute
+ if (
+ child.type.name === 'blockGroup'
+ // && child.attrs.listType === listType
+ ) {
+ const $pos = group.resolve(pos)
+ let newLevel: string
+ if (indent > 0) {
+ const numericLevel = $pos.depth / 2 + groupLevel
+ newLevel = numericLevel < 4 ? numericLevel.toString() : '3'
+ } else {
+ const numericLevel = $pos.depth / 2 + groupLevel - 1
+ newLevel = numericLevel < 4 ? numericLevel.toString() : '3'
+ }
+ const container = state.doc.resolve(groupPos.start() + pos - 1).parent
+ const posAddition = container.type.name === 'blockContainer' ? -1 : 0
+
+ if (newLevel !== child.attrs.listLevel) {
+ state.tr.setNodeAttribute(groupPos.start() + pos + posAddition, 'listLevel', newLevel)
+ }
+ }
+ })
+ return true
+ }
+ return false
+ },
+ // Updates a block group at a given position.
+ UpdateGroup:
+ (posInBlock, listType, tab, isSank = false, turnInto = false) =>
+ ({ state, dispatch }) => {
+ // Find block group, block container and depth it is at
+ const {
+ group,
+ container,
+ depth,
+ level: groupLevel,
+ $pos,
+ } = getGroupInfoFromPos(posInBlock < 0 ? state.selection.from : posInBlock, state)
+
+ if (isSank && group.attrs.listType === listType) return true
+
+ // Change group type to div
+ if (group.attrs.listType !== 'div' && listType === 'div' && container) {
+ setTimeout(() => {
+ this.editor
+ .chain()
+ .command(({ state, dispatch }) => {
+ if (dispatch) {
+ // setTimeout(() => {
+ state.tr.setNodeMarkup($pos.before(depth), null, {
+ ...group.attrs,
+ listType: 'Group',
+ listLevel: '1',
+ })
+ // })
+ return true
+ }
+ return false
+ })
+ .UpdateGroupChildren(container, $pos, 2, group.attrs.listType, -1)
+ .run()
+ })
+
+ return true
+ }
+
+ // If block is first block in the document do nothing
+ if (
+ $pos.node(depth - 1).type.name === 'doc' &&
+ container &&
+ group.firstChild?.attrs.id === container.attrs.id
+ )
+ return false
+
+ // If block is not the first in its' group, sink list item and then update group
+ if (group.firstChild && container && group.firstChild.attrs.id !== container.attrs.id && !tab) {
+ setTimeout(() => {
+ this.editor.chain().sinkListItem('blockContainer').UpdateGroup(-1, listType, tab, true).run()
+
+ return true
+ })
+
+ return false
+ }
+
+ // If inserting other list type in another list, sink list item and then update group
+ if (
+ group.attrs.listType !== 'Group' &&
+ group.attrs.listType !== listType &&
+ container &&
+ !tab &&
+ !turnInto &&
+ !isSank
+ ) {
+ setTimeout(() => {
+ this.editor.chain().sinkListItem('blockContainer').UpdateGroup(-1, listType, tab, true).run()
+
+ return true
+ })
+ return false
+ }
+
+ if (dispatch && group.type.name === 'blockGroup') {
+ let level = '1'
+ if (depth > 7) level = '3'
+ else {
+ switch (depth) {
+ case 7:
+ level = '3'
+ break
+ case 5:
+ level = '2'
+ break
+ default:
+ break
+ }
+ }
+
+ // start
+ // ? state.tr.setNodeMarkup($pos.before(depth), null, {
+ // ...group.attrs,
+ // listType: listType,
+ // listLevel: level,
+ // start: parseInt(start),
+ // })
+ // :
+ state.tr.setNodeMarkup($pos.before(depth), null, {
+ ...group.attrs,
+ listType: listType,
+ listLevel: level,
+ })
+
+ if (container) {
+ setTimeout(() => {
+ this.editor.commands.UpdateGroupChildren(container!, $pos, groupLevel, listType, 1)
+ })
+ }
+ }
+
+ return true
+ },
+ }
+ },
+
+ addProseMirrorPlugins() {
+ return [
+ // PreviousBlockTypePlugin(),
+ SelectionPlugin,
+ ClickSelectionPlugin,
+ PastePlugin,
+ headingLinePlugin,
+ ]
+ },
+
+ addKeyboardShortcuts() {
+ // handleBackspace is partially adapted from https://github.com/ueberdosis/tiptap/blob/ed56337470efb4fd277128ab7ef792b37cfae992/packages/core/src/extensions/keymap.ts
+ const handleBackspace = () =>
+ this.editor.commands.first(({ commands }) => [
+ // Deletes the selection if it's not empty.
+ () => commands.deleteSelection(),
+ // Undoes an input rule if one was triggered in the last editor state change.
+ () => commands.undoInputRule(),
+ () =>
+ commands.command(({ state, dispatch, chain }) => {
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const blockInfo = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const isParagraph = blockInfo.contentType.name === 'paragraph'
+ let parentInfo = getParentBlockFromPos(state, state.selection.from)
+
+ if (selectionAtBlockStart && isParagraph && parentInfo) {
+ let { parentBlock, parentGroup, parentPos } = parentInfo
+ let isFirstChild = blockInfo.node.attrs.id === parentGroup.firstChild?.attrs.id
+ let isParentBlockHeading = parentBlock?.type.name === 'heading'
+
+ if (
+ // is the first child of the parent group
+ isFirstChild &&
+ // the parent of the current block is type "heading"
+ isParentBlockHeading &&
+ // parentBlock is defined
+ parentBlock
+ ) {
+ const { startPos, node, depth, endPos, contentNode } = blockInfo
+
+ // the position in which we are inserting the current block content
+ const parentInsertPos = parentPos + parentBlock?.nodeSize - 1
+
+ // lift any children of current block (if any)
+ if (node.childCount === 2) {
+ // the current block has children, we need to re-parent
+ const childBlocksStart = state.doc.resolve(startPos + contentNode.nodeSize + 1)
+ const childBlocksEnd = state.doc.resolve(endPos - 1)
+ const childBlocksRange = childBlocksStart.blockRange(childBlocksEnd)
+
+ // Moves the block group node inside the block into the block group node that the current block is in.
+ if (dispatch) {
+ state.tr.lift(childBlocksRange!, depth - 1)
+ }
+ }
+
+ if (dispatch) {
+ dispatch(
+ state.tr
+ // delete the current block content
+ .deleteRange(startPos, startPos + contentNode.nodeSize)
+ // insert the current block content into the parent heading
+ .insert(parentInsertPos, contentNode.content),
+ )
+
+ // set the selection to the join between the previous heading content and the new content inserted
+ // this needs to happen after the transaction above because the document now is "different", hence we need to set
+ // the selection to a new pos.
+ state.tr.setSelection(new TextSelection(state.doc.resolve(parentInsertPos)))
+ }
+
+ return true
+ }
+ }
+ return false
+ }),
+ // Convert a list into a normal group if the selection is at the start of the list
+ () =>
+ commands.command(({ state, view }) => {
+ const { group, container, depth, $pos } = getGroupInfoFromPos(state.selection.from, state)
+
+ if (group.attrs.listType !== 'div' && $pos.pos === $pos.start()) {
+ // If block is first in the group change group type
+ if (container && group.firstChild?.attrs.id === container.attrs.id) {
+ setTimeout(() => {
+ view.dispatch(
+ state.tr.setNodeMarkup($pos.before(depth), null, {
+ ...group.attrs,
+ listType: 'div',
+ listLevel: '1',
+ }),
+ )
+
+ this.editor.commands.UpdateGroupChildren(container, $pos, 2, group.attrs.listType, -1)
+ })
+
+ return true
+ }
+ }
+ return false
+ }),
+ // If previous block is media, node select it
+ () =>
+ commands.command(({ state, dispatch, view }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, state.selection.from)!
+ const prevBlockInfo = getBlockInfoFromPos(
+ state.doc,
+ state.selection.$anchor.pos - state.selection.$anchor.depth,
+ )
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ const isParagraph = blockInfo.contentType.name === 'paragraph'
+
+ if (selectionAtBlockStart) {
+ if (isParagraph) {
+ if (blockInfo.contentType.name === 'image') {
+ let tr = state.tr
+ const selection = NodeSelection.create(state.doc, blockInfo.startPos)
+ tr = tr.setSelection(selection)
+ view.dispatch(tr)
+ return true
+ }
+ if (!prevBlockInfo) return false
+ if (
+ ['file', 'embed', 'video', 'web-embed', 'math'].includes(prevBlockInfo.contentType.name) ||
+ (prevBlockInfo.contentType.name === 'image' && prevBlockInfo.contentNode.attrs.url.length === 0)
+ ) {
+ if (dispatch) {
+ const { startPos, contentNode } = blockInfo
+ state.tr
+ .setSelection(NodeSelection.create(state.doc, prevBlockInfo.startPos))
+ .deleteRange(startPos, startPos + contentNode.nodeSize)
+ return true
+ }
+ }
+ } else {
+ return commands.BNUpdateBlock(state.selection.from, {
+ type: 'paragraph',
+ props: {},
+ })
+ }
+ }
+
+ return false
+ }),
+ // move blockID with content if selection is at the start of block, the block has content AND the block above is empty
+ () =>
+ commands.command(({ state, chain }) => {
+ const blockData = getBlockInfoFromPos(state.doc, state.selection.from)!
+ const groupData = getGroupInfoFromPos(state.selection.from!, state)
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ let prevBlockEndPos = blockData.startPos - 2
+ let prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+
+ if (
+ // selection is at the start of the block
+ selectionAtBlockStart &&
+ // current block is not empty
+ blockData.node.textContent.length > 0 &&
+ // the selected block is not the first block of the child
+ groupData.group.firstChild?.attrs.id !== blockData.id &&
+ // previous block is a blockContainer
+ prevBlockInfo.node.type.name === 'blockContainer' &&
+ // prev block is empty
+ prevBlockInfo.node.textContent.length === 0
+ ) {
+ chain().BNDeleteBlock(prevBlockInfo.startPos).run()
+
+ return true
+ }
+ return false
+ }),
+ // Merge blocks if a block is in the middle of a list
+ () =>
+ commands.command(({ state, chain }) => {
+ const blockData = getBlockInfoFromPos(state.doc, state.selection.from)!
+ const groupData = getGroupInfoFromPos(state.selection.from!, state)
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ let prevBlockEndPos = blockData.startPos - 2
+ let prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+
+ if (
+ // selection is at the start of the block
+ selectionAtBlockStart &&
+ // the selected block is not the first block of the child
+ groupData.group.firstChild?.attrs.id !== blockData.id &&
+ // previous block is a blockContainer
+ prevBlockInfo.node.type.name === 'blockContainer'
+ ) {
+ return commands.BNMergeBlocks(blockData.startPos - 1)
+ }
+ return false
+ }),
+ // Reverts block content type to a paragraph if the selection is at the start of the block.
+ () =>
+ commands.command(({ state }) => {
+ const { contentType } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const isParagraph = contentType.name === 'paragraph'
+
+ if (selectionAtBlockStart && !isParagraph) {
+ return commands.BNUpdateBlock(state.selection.from, {
+ type: 'paragraph',
+ props: {},
+ })
+ }
+
+ return false
+ }),
+ // Removes a level of nesting if the block is indented if the selection is at the start of the block.
+ () =>
+ commands.command(({ state }) => {
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ if (selectionAtBlockStart) {
+ return commands.liftListItem('blockContainer')
+ }
+
+ return false
+ }),
+ // Merges block with the previous one if it isn't indented, isn't the first block in the doc, and the selection
+ // is at the start of the block.
+ () =>
+ commands.command(({ state }) => {
+ const { depth, startPos } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockAtDocStart = startPos === 2
+
+ const posBetweenBlocks = startPos - 1
+
+ if (!blockAtDocStart && selectionAtBlockStart && selectionEmpty && depth === 2) {
+ return commands.BNMergeBlocks(posBetweenBlocks)
+ }
+
+ return false
+ }),
+ ])
+
+ const handleEnter = () =>
+ this.editor.commands.first(({ commands }) => [
+ // add a block on top of the current one so the block ID will follow the content
+ () =>
+ commands.command(({ state, chain }) => {
+ const data = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockEmpty = data.node.textContent.length === 0
+ const newBlockInsertionPos = data.startPos - 1
+
+ if (selectionAtBlockStart && selectionEmpty && !blockEmpty) {
+ chain()
+ .BNCreateBlock(newBlockInsertionPos)
+ // .setTextSelection(newBlockContentPos)
+ .run()
+
+ return true
+ }
+
+ return false
+ }),
+ // when the current block is a heading, do a special splitBlock to suggest heading hierarchy
+ () =>
+ commands.command(({ state, chain }) => {
+ const { contentNode } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ // if selection is not in the beginning of the heading and is a heading,
+ // we need to check what we need to do
+ if (!selectionAtBlockStart && contentNode.type.name === 'heading') {
+ chain().deleteSelection().BNSplitHeadingBlock(state.selection.from).run()
+ return true
+ }
+
+ return false
+ }),
+ // Removes a level of nesting if the block is empty & indented, while the selection is also empty & at the start
+ // of the block.
+ () =>
+ commands.command(({ state }) => {
+ const { node, depth } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const { group } = getGroupInfoFromPos(state.selection.from, state)
+ const isLastBlock = group.lastChild?.attrs.id === node.attrs.id
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockEmpty = node.textContent.length === 0
+ const blockIndented = depth > 2
+
+ if (selectionAtBlockStart && selectionEmpty && blockEmpty && blockIndented && isLastBlock) {
+ return commands.liftListItem('blockContainer')
+ }
+
+ return false
+ }),
+ // Creates a new block and moves the selection to it if the current one is empty, while the selection is also
+ // empty & at the start of the block.
+ () =>
+ commands.command(({ state, chain }) => {
+ const { node, endPos } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockEmpty = node.textContent.length === 0
+
+ if (selectionAtBlockStart && selectionEmpty && blockEmpty) {
+ const newBlockInsertionPos = endPos + 1
+ const newBlockContentPos = newBlockInsertionPos + 2
+
+ chain().BNCreateBlock(newBlockInsertionPos).setTextSelection(newBlockContentPos).run()
+
+ return true
+ }
+
+ return false
+ }),
+
+ // Splits the current block, moving content inside that's after the cursor to a new text block below. Also
+ // deletes the selection beforehand, if it's not empty.
+ () =>
+ commands.command(({ state, chain }) => {
+ const { node } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const blockEmpty = node.textContent.length === 0
+
+ if (!blockEmpty) {
+ chain().deleteSelection().BNSplitBlock(state.selection.from, false).run()
+
+ return true
+ }
+
+ return false
+ }),
+ ])
+
+ const handleTab = () =>
+ this.editor.commands.first(({ commands }) => [
+ () =>
+ commands.command(({ state }) => {
+ // Find block group, block container and depth it is at
+ const { group } = getGroupInfoFromPos(state.selection.from, state)
+
+ if (group.type.name === 'blockGroup' && group.attrs.listType !== 'Group') {
+ setTimeout(() => {
+ this.editor.chain().sinkListItem('blockContainer').UpdateGroup(-1, group.attrs.listType, true).run()
+ })
+ return true
+ }
+ return false
+ }),
+ () => {
+ commands.command(({ state }) => {
+ const { group, container, level, $pos } = getGroupInfoFromPos(state.selection.from, state)
+
+ if (container)
+ setTimeout(() => {
+ this.editor
+ .chain()
+ .sinkListItem('blockContainer')
+ .UpdateGroupChildren(container, $pos, level + 1, group.attrs.listType, 1)
+ .run()
+ })
+ else commands.sinkListItem('blockContainer')
+ return true
+ })
+ return true
+ },
+ ])
+
+ const handleShiftTab = () =>
+ this.editor.commands.first(({ commands }) => [
+ () =>
+ commands.command(({ state }) => {
+ // Find block group, block container and depth it is at
+ const { group, container, level, $pos } = getGroupInfoFromPos(state.selection.from, state)
+
+ // if (group.attrs.listType === 'Group') return false
+
+ if (container) {
+ setTimeout(() => {
+ this.editor.commands.UpdateGroupChildren(container, $pos, level, group.attrs.listType, -1)
+ })
+ }
+ return false
+ }),
+ () => {
+ commands.liftListItem('blockContainer')
+ return true
+ },
+ ])
+
+ return {
+ Backspace: handleBackspace,
+ Enter: handleEnter,
+ // Always returning true for tab key presses ensures they're not captured by the browser. Otherwise, they blur the
+ // editor since the browser will try to use tab for keyboard navigation.
+ Tab: handleTab,
+ 'Shift-Tab': handleShiftTab,
+ 'Mod-Alt-0': () => this.editor.commands.BNCreateBlock(this.editor.state.selection.anchor + 2),
+ 'Mod-Alt-1': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'heading',
+ props: {
+ level: '1',
+ },
+ }),
+ 'Mod-Alt-2': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'heading',
+ props: {
+ level: '2',
+ },
+ }),
+ 'Mod-Alt-3': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'heading',
+ props: {
+ level: '3',
+ },
+ }),
+ 'Mod-Shift-7': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'bulletListItem',
+ props: {},
+ }),
+ 'Mod-Shift-8': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'numberedListItem',
+ props: {},
+ }),
+ 'Shift-ArrowLeft': () => {
+ const { state, view } = this.editor
+ const { selection } = state
+ const { id: selectedId } = getBlockInfoFromPos(state.doc, selection.from - 1)
+ if (selection.from <= 3) {
+ return false
+ }
+ if (selection.from === selection.$from.start()) {
+ let currentPos = selection.from - 1
+ let currentNode = state.doc.resolve(currentPos).parent
+ let { id: currentId } = getBlockInfoFromPos(state.doc, currentPos)
+ while (selectedId === currentId || ['blockContainer', 'blockGroup'].includes(currentNode.type.name)) {
+ currentPos--
+ currentNode = state.doc.resolve(currentPos).parent
+ currentId = getBlockInfoFromPos(state.doc, currentPos).id
+ }
greptile
logic: Infinite loop possible if getBlockInfoFromPos fails or returns unexpected values
diff block
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+# Dev-only script to ensure postgres is up
+# for services NOT managed by Docker Compose
+
+DB_HOST=localhost
+DB_PORT=5432
+DB_NAME=${1:-posthog}
+DB_USER=posthog
+export PGPASSWORD=posthog
+
+# Loop until PostgreSQL is up and accepting connections
+until psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -c '\q' > /dev/null 2>&1; do
+ echo "Awaiting PostgresSQL warmup..."
+ sleep 1
+done
greptile
style: Consider adding a timeout or max retry count to prevent infinite loops if PostgreSQL never comes up.
suggested fix
+MAX_RETRIES=60
+RETRY_COUNT=0
until psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -c '\q' > /dev/null 2>&1; do
echo "Awaiting PostgresSQL warmup..."
sleep 1
+ RETRY_COUNT=$((RETRY_COUNT + 1))
+ if [ $RETRY_COUNT -ge $MAX_RETRIES ]; then
+ echo "Timeout waiting for PostgreSQL to start"
+ exit 1
+ fi
done
diff block
});
const searchParams = useSearchParams();
+ // TODO: This is a hack to reset the pagination when the search params change. WHY?
useEffect(() => {
setExecutionPagination({
...executionPagination,
offset: 0,
});
}, [searchParams]);
greptile
style: This effect has a potential issue with the dependency array. It depends on `executionPagination` but also modifies it, which could cause unnecessary rerenders or infinite loops in some cases. Consider using a functional update instead.
suggested fix
+ useEffect(() => {
+ setExecutionPagination(prev => ({
+ ...prev,
+ offset: 0,
+ }));
+ }, [searchParams]);
diff block
+import { execa } from 'execa';
+import { sleep } from '../tools/sleep.ts';
+
+const checkPodsReadiness = async (): Promise<boolean> => {
+ const { stdout } = await execa('kubectl', ['get', 'pods']);
+ console.clear();
+ console.log('Current pod status:');
+ console.log(stdout);
+
+ const lines = stdout.split('\n').slice(1); // Skip the header line
+ return (
+ lines.length > 0 &&
+ lines.every(line => {
+ const [, ready] = line.split(/\s+/);
+ const [readyCount, totalCount] = ready.split('/');
+ return readyCount === totalCount;
+ })
+ );
+};
+
+const main = async () => {
+ console.log('Starting pod readiness check...');
+ for (;;) {
+ const allReady = await checkPodsReadiness();
+ if (allReady) {
+ console.log('All pods are ready!');
+ process.exit(0);
+ }
+ await sleep(2 * 1_000);
+ }
greptile
logic: infinite loop with no timeout could run forever if pods never become ready. Consider adding a maximum retry count or timeout duration
diff block
+import { getDiscorBasedRequestHeaders } from "discord-token-decrypt";
+import { getDiscordToken } from "discord-token-decrypt";
+import fetch, { type RequestInit } from "node-fetch";
+import { LocalStorage } from "@raycast/api";
+
+class Discord {
+ #baseUrl = "https://discord.com/api/v9";
+ #token: string | null = null;
+ #tokenPromise: Promise<string> | null = null;
+
+ #tokenCacheDuration = 7 * 24 * 60 * 60 * 1000; // 1 week
+
+ async #getToken(): Promise<string> {
+ if (this.#tokenPromise) {
+ return this.#tokenPromise;
+ }
+
+ this.#tokenPromise = this.#fetchTokenWithCache();
+
+ try {
+ const token = await this.#tokenPromise;
+ this.#token = token;
+ return token;
+ } finally {
+ this.#tokenPromise = null;
+ }
+ }
+
+ async #fetchTokenWithCache(): Promise<string> {
+ if (this.#token) {
+ return this.#token;
+ }
+
+ const cachedToken = await LocalStorage.getItem<string>("discord-token-cache");
+ if (cachedToken) {
+ const parsedCache = JSON.parse(cachedToken) as { token: string; expiresAt: number };
+ if (parsedCache.expiresAt > Date.now()) {
+ return parsedCache.token;
+ }
+ }
+
+ const token = await getDiscordToken();
+ if (!token) {
+ throw new Error("Failed to get Discord token");
+ }
+
+ const tokenCache = {
+ token,
+ expiresAt: Date.now() + this.#tokenCacheDuration,
+ };
+
+ await LocalStorage.setItem("discord-token-cache", JSON.stringify(tokenCache));
+ return token;
+ }
+
+ async #buildHeaders() {
+ const token = await this.#getToken();
+ const baseHeaders = getDiscorBasedRequestHeaders();
+ return {
+ ...baseHeaders,
+ Authorization: token,
+ };
+ }
+
+ async invalidateToken(): Promise<void> {
+ this.#token = null;
+ await LocalStorage.removeItem("discord-token-cache");
+ }
+
+ async #apiRequest<T>(path: string, options: RequestInit = {}, isRetry = false): Promise<T> {
+ const headers = await this.#buildHeaders();
+ const requestOptions = {
+ ...options,
+ headers: {
+ ...headers,
+ ...(options.headers || {}),
+ },
+ };
+
+ const response = await fetch(this.#baseUrl + path, requestOptions);
+
+ if (response.status === 401 && !isRetry) {
+ console.log("Unauthorized error detected, refreshing token and retrying...");
+
+ await this.invalidateToken();
+ return this.#apiRequest<T>(path, options, true);
+ }
greptile
logic: The retry logic should have a maximum retry count to prevent infinite loops on persistent auth failures
suggested fix
if (response.status === 401 && !isRetry) {
console.log("Unauthorized error detected, refreshing token and retrying...");
await this.invalidateToken();
+ // Only retry once to prevent infinite loops
return this.#apiRequest<T>(path, options, true);
}
diff block
+from onyx.connectors.connector_runner import CheckpointOutputWrapper
+from onyx.connectors.interfaces import CheckpointConnector
+from onyx.connectors.interfaces import SecondsSinceUnixEpoch
+from onyx.connectors.models import ConnectorCheckpoint
+from onyx.connectors.models import ConnectorFailure
+from onyx.connectors.models import Document
+
+_ITERATION_LIMIT = 100_000
+
+
+def load_all_docs_from_checkpoint_connector(
+ connector: CheckpointConnector,
+ start: SecondsSinceUnixEpoch,
+ end: SecondsSinceUnixEpoch,
+) -> list[Document]:
+ num_iterations = 0
+
+ checkpoint = ConnectorCheckpoint.build_dummy_checkpoint()
+ documents: list[Document] = []
+ while checkpoint.has_more:
+ doc_batch_generator = CheckpointOutputWrapper()(
+ connector.load_from_checkpoint(start, end, checkpoint)
+ )
+ for document, failure, next_checkpoint in doc_batch_generator:
+ if failure is not None:
+ raise RuntimeError(f"Failed to load documents: {failure}")
+ if document is not None:
+ documents.append(document)
+ if next_checkpoint is not None:
+ checkpoint = next_checkpoint
greptile
logic: No validation that next_checkpoint is not the same as current checkpoint, which could cause an infinite loop even with iteration limit
suggested fix
if next_checkpoint is not None:
+ if next_checkpoint == checkpoint:
+ raise RuntimeError("Checkpoint did not advance - potential infinite loop")
checkpoint = next_checkpoint
diff block
+import * as React from "react"
+
+import type {
+ ToastActionElement,
+ ToastProps,
+} from "@/components/ui/toast"
+
+const TOAST_LIMIT = 1
+const TOAST_REMOVE_DELAY = 1000000
+
+type ToasterToast = ToastProps & {
+ id: string
+ title?: React.ReactNode
+ description?: React.ReactNode
+ action?: ToastActionElement
+}
+
+const actionTypes = {
+ ADD_TOAST: "ADD_TOAST",
+ UPDATE_TOAST: "UPDATE_TOAST",
+ DISMISS_TOAST: "DISMISS_TOAST",
+ REMOVE_TOAST: "REMOVE_TOAST",
+} as const
+
+let count = 0
+
+function genId() {
+ count = (count + 1) % Number.MAX_SAFE_INTEGER
+ return count.toString()
+}
+
+type ActionType = typeof actionTypes
+
+type Action =
+ | {
+ type: ActionType["ADD_TOAST"]
+ toast: ToasterToast
+ }
+ | {
+ type: ActionType["UPDATE_TOAST"]
+ toast: Partial<ToasterToast>
+ }
+ | {
+ type: ActionType["DISMISS_TOAST"]
+ toastId?: ToasterToast["id"]
+ }
+ | {
+ type: ActionType["REMOVE_TOAST"]
+ toastId?: ToasterToast["id"]
+ }
+
+interface State {
+ toasts: ToasterToast[]
+}
+
+const toastTimeouts = new Map<string, ReturnType<typeof setTimeout>>()
+
+const addToRemoveQueue = (toastId: string) => {
+ if (toastTimeouts.has(toastId)) {
+ return
+ }
+
+ const timeout = setTimeout(() => {
+ toastTimeouts.delete(toastId)
+ dispatch({
+ type: "REMOVE_TOAST",
+ toastId: toastId,
+ })
+ }, TOAST_REMOVE_DELAY)
+
+ toastTimeouts.set(toastId, timeout)
+}
+
+export const reducer = (state: State, action: Action): State => {
+ switch (action.type) {
+ case "ADD_TOAST":
+ return {
+ ...state,
+ toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT),
+ }
+
+ case "UPDATE_TOAST":
+ return {
+ ...state,
+ toasts: state.toasts.map((t) =>
+ t.id === action.toast.id ? { ...t, ...action.toast } : t
+ ),
+ }
+
+ case "DISMISS_TOAST": {
+ const { toastId } = action
+
+ // ! Side effects ! - This could be extracted into a dismissToast() action,
+ // but I'll keep it here for simplicity
+ if (toastId) {
+ addToRemoveQueue(toastId)
+ } else {
+ state.toasts.forEach((toast) => {
+ addToRemoveQueue(toast.id)
+ })
+ }
+
+ return {
+ ...state,
+ toasts: state.toasts.map((t) =>
+ t.id === toastId || toastId === undefined
+ ? {
+ ...t,
+ open: false,
+ }
+ : t
+ ),
+ }
+ }
+ case "REMOVE_TOAST":
+ if (action.toastId === undefined) {
+ return {
+ ...state,
+ toasts: [],
+ }
+ }
+ return {
+ ...state,
+ toasts: state.toasts.filter((t) => t.id !== action.toastId),
+ }
+ }
+}
+
+const listeners: Array<(state: State) => void> = []
+
+let memoryState: State = { toasts: [] }
+
+function dispatch(action: Action) {
+ memoryState = reducer(memoryState, action)
+ listeners.forEach((listener) => {
+ listener(memoryState)
+ })
+}
+
+type Toast = Omit<ToasterToast, "id">
+
+function toast({ ...props }: Toast) {
+ const id = genId()
+
+ const update = (props: ToasterToast) =>
+ dispatch({
+ type: "UPDATE_TOAST",
+ toast: { ...props, id },
+ })
+ const dismiss = () => dispatch({ type: "DISMISS_TOAST", toastId: id })
+
+ dispatch({
+ type: "ADD_TOAST",
+ toast: {
+ ...props,
+ id,
+ open: true,
+ onOpenChange: (open) => {
+ if (!open) dismiss()
+ },
+ },
+ })
+
+ return {
+ id: id,
+ dismiss,
+ update,
+ }
+}
+
+function useToast() {
+ const [state, setState] = React.useState<State>(memoryState)
+
+ React.useEffect(() => {
+ listeners.push(setState)
+ return () => {
+ const index = listeners.indexOf(setState)
+ if (index > -1) {
+ listeners.splice(index, 1)
+ }
+ }
+ }, [state])
greptile
logic: Adding state as a dependency to useEffect creates an infinite loop since the state changes trigger the effect which updates listeners
suggested fix
React.useEffect(() => {
listeners.push(setState)
return () => {
const index = listeners.indexOf(setState)
if (index > -1) {
listeners.splice(index, 1)
}
}
+ }, [])
diff block
+import { mergeAttributes, Node } from '@tiptap/core'
+import { Fragment, Node as PMNode, Slice } from 'prosemirror-model'
+import { EditorState, NodeSelection, Plugin, PluginKey, TextSelection } from 'prosemirror-state'
+import { Decoration, DecorationSet } from 'prosemirror-view'
+import { ResolvedPos } from '@tiptap/pm/model'
+import { EditorView } from '@tiptap/pm/view'
+import { blockToNode, inlineContentToNodes } from '../../../api/nodeConversions/nodeConversions'
+
+import { BlockChildrenType, BlockNoteDOMAttributes, BlockSchema, PartialBlock } from '../api/blockTypes'
+import { mergeCSSClasses } from '../../../shared/utils'
+import { getBlockInfoFromPos } from '../helpers/getBlockInfoFromPos'
+import { getGroupInfoFromPos } from '../helpers/getGroupInfoFromPos'
+import styles from './Block.module.css'
+import BlockAttributes from './BlockAttributes'
+
+const SelectionPluginKey = new PluginKey('selectionPluginKey')
+const ClickSelectionPluginKey = new PluginKey('clickSelectionPluginKey')
+const PastePluginKey = new PluginKey('pastePluginKey')
+const headingLinePluginKey = new PluginKey('HeadingLinePlugin')
+
+const SelectionPlugin = new Plugin({
+ key: SelectionPluginKey,
+ state: {
+ init() {
+ return DecorationSet.empty
+ },
+ apply(tr, oldState) {
+ return tr.getMeta(SelectionPluginKey) || oldState
+ },
+ },
+ props: {
+ decorations(state) {
+ return this.getState(state)
+ },
+ },
+})
+
+const ClickSelectionPlugin = new Plugin({
+ key: ClickSelectionPluginKey,
+ props: {
+ handleDOMEvents: {
+ mousedown(view, event) {
+ if (event.shiftKey && event.button === 0) {
+ const { state } = view
+ const editorBoundingBox = (view.dom.firstChild! as HTMLElement).getBoundingClientRect()
+ const coords = {
+ left: editorBoundingBox.left + editorBoundingBox.width / 2, // take middle of editor
+ top: event.clientY,
+ }
+ const pos = view.posAtCoords(coords)
+ if (!pos) {
+ return undefined
+ }
+ const { selection } = state
+ const selectedPos = state.doc.resolve(selection.from)
+ const nodePos = state.doc.resolve(pos.pos)
+ if (selectedPos.start() === selection.from && pos.pos === nodePos.end()) {
+ const decoration = Decoration.widget(nodePos.pos, () => {
+ const span = document.createElement('span')
+ span.style.backgroundColor = 'blue'
+ span.style.width = '10px'
+ span.style.height = '10px'
+ return span
+ })
+ const decorationSet = DecorationSet.create(state.doc, [decoration])
+ view.dispatch(state.tr.setMeta(SelectionPluginKey, decorationSet))
+ }
+ return false
+ }
+ return false
+ },
+ },
+ },
+})
+
+const PastePlugin = new Plugin({
+ key: PastePluginKey,
+ props: {
+ handlePaste: (view, event) => {
+ if (!event.clipboardData) {
+ return false
+ }
+
+ const { state } = view
+ let { tr } = state
+ const { selection } = state
+ const { $from, $to } = selection
+
+ const targetNode = state.doc.resolve($from.pos).parent
+
+ if (targetNode.type.name === 'image') {
+ tr = tr.insertText(event.clipboardData.getData('text/plain'), $from.pos, $to.pos)
+ view.dispatch(tr)
+ return true
+ }
+
+ return false
+ },
+ },
+})
+
+function getNearestHeadingFromPos(state: EditorState, pos: number) {
+ const $pos = state.doc.resolve(pos)
+ const maxDepth = $pos.depth
+ let group = $pos.node(maxDepth)
+ let heading = group.firstChild
+ let depth = maxDepth
+
+ if (maxDepth > 3) {
+ while (depth >= 0 && group.type.name !== 'blockContainer' && heading?.type.name !== 'heading') {
+ depth -= 1
+ group = $pos.node(depth)
+ heading = group.firstChild
+ }
+ return {
+ depth,
+ groupStartPos: $pos.start(depth),
+ heading,
+ group,
+ $pos,
+ }
+ }
+
+ return null
+}
+
+class HeadingLinePlugin {
+ private line: HTMLElement
+
+ constructor(view: EditorView) {
+ this.line = document.createElement('div')
+ this.line.style.transition = 'all 0.15s ease-in-out'
+ this.line.style.pointerEvents = 'none'
+ this.line.style.display = ''
+ this.line.style.opacity = '0'
+ view.dom.parentNode?.appendChild(this.line)
+
+ this.update(view, null)
+ }
+
+ update(view: EditorView, lastState: EditorState | null) {
+ const state = view.state
+ // Don't do anything if the document/selection didn't change
+ if (lastState && lastState.doc.eq(state.doc) && lastState.selection.eq(state.selection)) return
+
+ const res = getNearestHeadingFromPos(state, state.selection.from)
+
+ if (res && res.heading?.type.name === 'heading') {
+ const { node } = view.domAtPos(res.groupStartPos)
+
+ const rect = (node as HTMLElement).getBoundingClientRect()
+ const editorRect = view.dom.getBoundingClientRect()
+ const groupPadding = 10
+ const editorPaddingTop = 40
+ this.line.style.position = 'absolute'
+ this.line.style.top = `${rect.top + editorPaddingTop + groupPadding - editorRect.top}px`
+ this.line.style.left = `${rect.left - editorRect.left + groupPadding}px`
+ this.line.style.width = `2.5px`
+ this.line.style.height = `${rect.height - groupPadding * 2}px`
+ this.line.style.backgroundColor = 'var(--brand5)'
+ this.line.style.opacity = '0.4'
+ } else {
+ this.line.style.opacity = '0'
+ }
+ }
+
+ destroy() {
+ this.line.remove()
+ }
+}
+
+const headingLinePlugin = new Plugin({
+ key: headingLinePluginKey,
+ view(editorView) {
+ return new HeadingLinePlugin(editorView)
+ },
+})
+
+export function getParentBlockFromPos(state: EditorState, pos: number) {
+ const $pos = state.doc.resolve(pos)
+ const depth = $pos.depth
+ if (depth > 3) {
+ const parent = $pos.node(depth - 3)
+ const parentGroup = $pos.node(depth - 2)
+ const parentPos = $pos.start(depth - 3)
+ return {
+ parentGroup,
+ parentBlock: parent.firstChild,
+ parentPos,
+ depth,
+ $pos,
+ }
+ }
+
+ return null
+}
+declare module '@tiptap/core' {
+ interface Commands<ReturnType> {
+ block: {
+ BNCreateBlock: (pos: number) => ReturnType
+ BNDeleteBlock: (posInBlock: number) => ReturnType
+ BNMergeBlocks: (posBetweenBlocks: number) => ReturnType
+ BNSplitBlock: (posInBlock: number, keepType: boolean) => ReturnType
+ BNSplitHeadingBlock: (posInBlock: number) => ReturnType
+ BNUpdateBlock: <BSchema extends BlockSchema>(posInBlock: number, block: PartialBlock<BSchema>) => ReturnType
+ BNCreateOrUpdateBlock: <BSchema extends BlockSchema>(
+ posInBlock: number,
+ block: PartialBlock<BSchema>,
+ ) => ReturnType
+ UpdateGroupChildren: (
+ group: PMNode,
+ groupPos: ResolvedPos,
+ groupLevel: number,
+ listType: BlockChildrenType,
+ indent: number,
+ ) => ReturnType
+ UpdateGroup: (
+ posInBlock: number,
+ listType: BlockChildrenType,
+ tab: boolean,
+ // start?: string,
+ isSank?: boolean,
+ turnInto?: boolean,
+ ) => ReturnType
+ }
+ }
+}
+
+/**
+ * The main "Block node" documents consist of
+ */
+export const BlockContainer = Node.create<{
+ domAttributes?: BlockNoteDOMAttributes
+}>({
+ name: 'blockContainer',
+ group: 'blockContainer',
+ // A block always contains content, and optionally a blockGroup which contains nested blocks
+ content: 'blockContent blockGroup?',
+ // Ensures content-specific keyboard handlers trigger first.
+ priority: 50,
+ defining: true,
+
+ parseHTML() {
+ return [
+ {
+ tag: 'div',
+ getAttrs: (element) => {
+ if (typeof element === 'string') {
+ return false
+ }
+
+ const attrs: Record<string, string> = {}
+ for (const [nodeAttr, HTMLAttr] of Object.entries(BlockAttributes)) {
+ if (element.getAttribute(HTMLAttr)) {
+ attrs[nodeAttr] = element.getAttribute(HTMLAttr)!
+ }
+ }
+
+ if (element.getAttribute('data-node-type') === 'blockContainer') {
+ return attrs
+ }
+
+ return false
+ },
+ },
+ ]
+ },
+
+ renderHTML({ HTMLAttributes }) {
+ const domAttributes = this.options.domAttributes?.blockContainer || {}
+
+ return [
+ 'div',
+ mergeAttributes(HTMLAttributes, {
+ class: styles.blockOuter,
+ 'data-node-type': 'block-outer',
+ }),
+ [
+ 'div',
+ mergeAttributes(
+ {
+ ...domAttributes,
+ class: mergeCSSClasses(styles.block, domAttributes.class),
+ 'data-node-type': this.name,
+ },
+ HTMLAttributes,
+ ),
+ 0,
+ ],
+ ]
+ },
+
+ addCommands() {
+ return {
+ // Creates a new text block at a given position.
+ BNCreateBlock:
+ (pos) =>
+ ({ state, dispatch }) => {
+ const newBlock = state.schema.nodes.blockContainer.createAndFill()!
+
+ if (dispatch) {
+ state.tr.insert(pos, newBlock)
+ }
+
+ return true
+ },
+ // Deletes a block at a given position.
+ BNDeleteBlock:
+ (posInBlock) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { startPos, endPos } = blockInfo
+
+ if (dispatch) {
+ state.tr.deleteRange(startPos, endPos)
+ }
+
+ return true
+ },
+ // Updates a block at a given position.
+ BNUpdateBlock:
+ (posInBlock, block) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { startPos, endPos, node, contentNode } = blockInfo
+
+ if (dispatch) {
+ // Adds blockGroup node with child blocks if necessary.
+ if (block.children !== undefined && block.children.length > 0) {
+ const childNodes = []
+
+ // Creates ProseMirror nodes for each child block, including their descendants.
+ for (const child of block.children) {
+ childNodes.push(blockToNode(child, state.schema))
+ }
+
+ // Checks if a blockGroup node already exists.
+ if (node.childCount === 2) {
+ // Replaces all child nodes in the existing blockGroup with the ones created earlier.
+ state.tr.replace(
+ startPos + contentNode.nodeSize + 1,
+ endPos - 1,
+ new Slice(Fragment.from(childNodes), 0, 0),
+ )
+ } else {
+ // Inserts a new blockGroup containing the child nodes created earlier.
+ state.tr.insert(startPos + contentNode.nodeSize, state.schema.nodes.blockGroup.create({}, childNodes))
+ }
+ }
+
+ // Replaces the blockContent node's content if necessary.
+ if (block.content !== undefined) {
+ let content: PMNode[] = []
+
+ // Checks if the provided content is a string or InlineContent[] type.
+ if (typeof block.content === 'string') {
+ // Adds a single text node with no marks to the content.
+ content.push(state.schema.text(block.content))
+ } else {
+ // Adds a text node with the provided styles converted into marks to the content, for each InlineContent
+ // object.
+ content = inlineContentToNodes(block.content, state.schema)
+ }
+
+ // Replaces the contents of the blockContent node with the previously created text node(s).
+ state.tr.replace(
+ startPos + 1,
+ startPos + contentNode.nodeSize - 1,
+ new Slice(Fragment.from(content), 0, 0),
+ )
+ }
+
+ // Changes the blockContent node type and adds the provided props as attributes. Also preserves all existing
+ // attributes that are compatible with the new type.
+ state.tr.setNodeMarkup(startPos, block.type === undefined ? undefined : state.schema.nodes[block.type], {
+ ...contentNode.attrs,
+ ...block.props,
+ })
+
+ // Adds all provided props as attributes to the parent blockContainer node too, and also preserves existing
+ // attributes.
+ const providedProps = {
+ ...node.attrs,
+ ...block.props,
+ }
+ state.tr.setNodeMarkup(startPos - 1, undefined, providedProps)
+ }
+
+ return true
+ },
+ // Appends the text contents of a block to the nearest previous block, given a position between them. Children of
+ // the merged block are moved out of it first, rather than also being merged.
+ //
+ // In the example below, the position passed into the function is between Block1 and Block2.
+ //
+ // Block1
+ // Block2
+ // Block3
+ // Block4
+ // Block5
+ //
+ // Becomes:
+ //
+ // Block1
+ // Block2Block3
+ // Block4
+ // Block5
+ BNMergeBlocks:
+ (posBetweenBlocks) =>
+ ({ state, dispatch }) => {
+ const nextNodeIsBlock = state.doc.resolve(posBetweenBlocks + 1).node().type.name === 'blockContainer'
+ const prevNodeIsBlock = state.doc.resolve(posBetweenBlocks - 1).node().type.name === 'blockContainer'
+
+ if (!nextNodeIsBlock || !prevNodeIsBlock) {
+ return false
+ }
+
+ const nextBlockInfo = getBlockInfoFromPos(state.doc, posBetweenBlocks + 1)
+
+ const { node, contentNode, startPos, endPos, depth } = nextBlockInfo!
+
+ // Removes a level of nesting all children of the next block by 1 level, if it contains both content and block
+ // group nodes.
+ if (node.childCount === 2) {
+ const childBlocksStart = state.doc.resolve(startPos + contentNode.nodeSize + 1)
+ const childBlocksEnd = state.doc.resolve(endPos - 1)
+ const childBlocksRange = childBlocksStart.blockRange(childBlocksEnd)
+
+ // Moves the block group node inside the block into the block group node that the current block is in.
+ if (dispatch) {
+ state.tr.lift(childBlocksRange!, depth - 1)
+ }
+ }
+
+ let prevBlockEndPos = posBetweenBlocks - 1
+ let prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+
+ // Finds the nearest previous block, regardless of nesting level.
+ while (prevBlockInfo!.numChildBlocks > 0) {
+ prevBlockEndPos--
+ prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+ if (prevBlockInfo === undefined) {
+ return false
+ }
+ }
+
+ // Deletes next block and adds its text content to the nearest previous block.
+
+ if (dispatch) {
+ dispatch(
+ state.tr
+ .deleteRange(startPos, startPos + contentNode.nodeSize)
+ .replace(prevBlockEndPos - 1, startPos, new Slice(contentNode.content, 0, 0))
+ .scrollIntoView(),
+ )
+
+ state.tr.setSelection(new TextSelection(state.doc.resolve(prevBlockEndPos - 1)))
+ }
+
+ return true
+ },
+ // Splits a block at a given position. Content after the position is moved to a new block below, at the same
+ // nesting level.
+ BNSplitBlock:
+ (posInBlock, keepType) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+
+ const { contentNode, contentType, startPos, endPos, depth } = blockInfo
+
+ const originalBlockContent = state.doc.cut(startPos + 1, posInBlock)
+ const newBlockContent = state.doc.cut(posInBlock, endPos - 1)
+
+ const newBlock = state.schema.nodes.blockContainer.createAndFill()!
+
+ const newBlockInsertionPos = endPos + 1
+ const newBlockContentPos = newBlockInsertionPos + 2
+
+ if (dispatch) {
+ // Creates a new block. Since the schema requires it to have a content node, a paragraph node is created
+ // automatically, spanning newBlockContentPos to newBlockContentPos + 1.
+ state.tr.insert(newBlockInsertionPos, newBlock)
+
+ // Replaces the content of the newly created block's content node. Doesn't replace the whole content node so
+ // its type doesn't change.
+ state.tr.replace(
+ newBlockContentPos,
+ newBlockContentPos + 1,
+ newBlockContent.content.size > 0
+ ? new Slice(Fragment.from(newBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+
+ // Changes the type of the content node. The range doesn't matter as long as both from and to positions are
+ // within the content node.
+ if (keepType) {
+ state.tr.setBlockType(
+ newBlockContentPos,
+ newBlockContentPos,
+ state.schema.node(contentType).type,
+ contentNode.attrs,
+ )
+ }
+
+ // Sets the selection to the start of the new block's content node.
+ state.tr.setSelection(new TextSelection(state.doc.resolve(newBlockContentPos)))
+
+ // Replaces the content of the original block's content node. Doesn't replace the whole content node so its
+ // type doesn't change.
+ state.tr.replace(
+ startPos + 1,
+ endPos - 1,
+ originalBlockContent.content.size > 0
+ ? new Slice(Fragment.from(originalBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+ }
+
+ return true
+ },
+ // Splits a block at a given position. Content after the position is moved to a new block below, at the same
+ // nesting level.
+ BNSplitHeadingBlock:
+ (posInBlock) =>
+ ({ state, dispatch }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, posInBlock)
+ if (blockInfo === undefined) {
+ return false
+ }
+ const { node, startPos, contentNode, depth } = blockInfo
+ if (node.childCount === 1) {
+ setTimeout(() => {
+ this.editor
+ .chain()
+ .deleteSelection()
+ .BNSplitBlock(state.selection.from, false)
+ .UpdateGroup(-1, blockInfo.node.attrs.listType, true)
+ .run()
+ })
+ } else {
+ const originalBlockContent = state.doc.cut(startPos + 1, state.selection.from)
+ const newBlockContent = state.doc.cut(state.selection.from, startPos + contentNode.nodeSize - 1)
+ const newBlock = state.schema.nodes.blockContainer.createAndFill()!
+ const newBlockInsertionPos = startPos + contentNode.nodeSize + 1
+ const newBlockContentPos = newBlockInsertionPos + 2
+
+ if (dispatch) {
+ // Creates a new block. Since the schema requires it to have a content node, a paragraph node is created
+ // automatically, spanning newBlockContentPos to newBlockContentPos + 1.
+ state.tr.insert(newBlockInsertionPos, newBlock)
+
+ // Replaces the content of the newly created block's content node. Doesn't replace the whole content node so
+ // its type doesn't change.
+ state.tr.replace(
+ newBlockContentPos,
+ newBlockContentPos + 1,
+ newBlockContent.content.size > 0
+ ? new Slice(Fragment.from(newBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+
+ // Sets the selection to the start of the new block's content node.
+ state.tr.setSelection(new TextSelection(state.doc.resolve(newBlockContentPos)))
+
+ state.tr.replace(
+ startPos + 1,
+ startPos + contentNode.nodeSize - 1,
+ originalBlockContent.content.size > 0
+ ? new Slice(Fragment.from(originalBlockContent), depth + 2, depth + 2)
+ : undefined,
+ )
+ }
+ }
+ return true
+ },
+ // Updates group's child groups.
+ UpdateGroupChildren:
+ (group, groupPos, groupLevel, listType, indent) =>
+ ({ state, dispatch }) => {
+ if (dispatch) {
+ group.descendants((child, pos) => {
+ // If child is a group, update it's list level attribute
+ if (
+ child.type.name === 'blockGroup'
+ // && child.attrs.listType === listType
+ ) {
+ const $pos = group.resolve(pos)
+ let newLevel: string
+ if (indent > 0) {
+ const numericLevel = $pos.depth / 2 + groupLevel
+ newLevel = numericLevel < 4 ? numericLevel.toString() : '3'
+ } else {
+ const numericLevel = $pos.depth / 2 + groupLevel - 1
+ newLevel = numericLevel < 4 ? numericLevel.toString() : '3'
+ }
+ const container = state.doc.resolve(groupPos.start() + pos - 1).parent
+ const posAddition = container.type.name === 'blockContainer' ? -1 : 0
+
+ if (newLevel !== child.attrs.listLevel) {
+ state.tr.setNodeAttribute(groupPos.start() + pos + posAddition, 'listLevel', newLevel)
+ }
+ }
+ })
+ return true
+ }
+ return false
+ },
+ // Updates a block group at a given position.
+ UpdateGroup:
+ (posInBlock, listType, tab, isSank = false, turnInto = false) =>
+ ({ state, dispatch }) => {
+ // Find block group, block container and depth it is at
+ const {
+ group,
+ container,
+ depth,
+ level: groupLevel,
+ $pos,
+ } = getGroupInfoFromPos(posInBlock < 0 ? state.selection.from : posInBlock, state)
+ if (isSank && group.attrs.listType === listType) return true
+
+ // Change group type to div
+ if (group.attrs.listType !== 'div' && listType === 'div' && container) {
+ setTimeout(() => {
+ this.editor
+ .chain()
+ // eslint-disable-next-line @typescript-eslint/no-shadow
+ .command(({ state, dispatch }) => {
+ if (dispatch) {
+ // setTimeout(() => {
+ state.tr.setNodeMarkup($pos.before(depth), null, {
+ ...group.attrs,
+ listType: 'Group',
+ listLevel: '1',
+ })
+ // })
+ return true
+ }
+ return false
+ })
+ .UpdateGroupChildren(container, $pos, 2, group.attrs.listType, -1)
+ .run()
+ })
+
+ return true
+ }
+
+ // If block is first block in the document do nothing
+ if (
+ $pos.node(depth - 1).type.name === 'doc' &&
+ container &&
+ group.firstChild?.attrs.id === container.attrs.id
+ )
+ return false
+
+ // If block is not the first in its' group, sink list item and then update group
+ if (group.firstChild && container && group.firstChild.attrs.id !== container.attrs.id && !tab) {
+ setTimeout(() => {
+ this.editor.chain().sinkListItem('blockContainer').UpdateGroup(-1, listType, tab, true).run()
+
+ return true
+ })
+
+ return false
+ }
+
+ // If inserting other list type in another list, sink list item and then update group
+ if (
+ group.attrs.listType !== 'Group' &&
+ group.attrs.listType !== listType &&
+ container &&
+ !tab &&
+ !turnInto &&
+ !isSank
+ ) {
+ setTimeout(() => {
+ this.editor.chain().sinkListItem('blockContainer').UpdateGroup(-1, listType, tab, true).run()
+
+ return true
+ })
+ return false
+ }
+
+ if (dispatch && group.type.name === 'blockGroup') {
+ let level = '1'
+ if (depth > 7) level = '3'
+ else {
+ switch (depth) {
+ case 7:
+ level = '3'
+ break
+ case 5:
+ level = '2'
+ break
+ default:
+ break
+ }
+ }
+
+ // start
+ // ? state.tr.setNodeMarkup($pos.before(depth), null, {
+ // ...group.attrs,
+ // listType: listType,
+ // listLevel: level,
+ // start: parseInt(start),
+ // })
+ // :
+ state.tr.setNodeMarkup($pos.before(depth), null, {
+ ...group.attrs,
+ listType: listType,
+ listLevel: level,
+ })
+
+ if (container) {
+ setTimeout(() => {
+ this.editor.commands.UpdateGroupChildren(container!, $pos, groupLevel, listType, 0)
+ })
+ }
+ }
+
+ return true
+ },
+ }
+ },
+
+ addProseMirrorPlugins() {
+ return [
+ // PreviousBlockTypePlugin(),
+ SelectionPlugin,
+ ClickSelectionPlugin,
+ PastePlugin,
+ headingLinePlugin,
+ ]
+ },
+
+ addKeyboardShortcuts() {
+ // handleBackspace is partially adapted from https://github.com/ueberdosis/tiptap/blob/ed56337470efb4fd277128ab7ef792b37cfae992/packages/core/src/extensions/keymap.ts
+ const handleBackspace = () =>
+ this.editor.commands.first(({ commands }) => [
+ // Deletes the selection if it's not empty.
+ () => commands.deleteSelection(),
+ // Undoes an input rule if one was triggered in the last editor state change.
+ () => commands.undoInputRule(),
+ () =>
+ commands.command(({ state, dispatch }) => {
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const blockInfo = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const isParagraph = blockInfo.contentType.name === 'paragraph'
+ const parentInfo = getParentBlockFromPos(state, state.selection.from)
+
+ if (selectionAtBlockStart && isParagraph && parentInfo) {
+ const { parentBlock, parentGroup, parentPos } = parentInfo
+ const isFirstChild = blockInfo.node.attrs.id === parentGroup.firstChild?.attrs.id
+ const isParentBlockHeading = parentBlock?.type.name === 'heading'
+
+ if (
+ // is the first child of the parent group
+ isFirstChild &&
+ // the parent of the current block is type "heading"
+ isParentBlockHeading &&
+ // parentBlock is defined
+ parentBlock
+ ) {
+ const { startPos, node, depth, endPos, contentNode } = blockInfo
+
+ // lift any children of current block (if any)
+ if (node.childCount === 2) {
+ // the current block has children, we need to re-parent
+ const childBlocksStart = state.doc.resolve(startPos + contentNode.nodeSize + 1)
+ const childBlocksEnd = state.doc.resolve(endPos - 1)
+ const childBlocksRange = childBlocksStart.blockRange(childBlocksEnd)
+
+ // Moves the block group node inside the block into the block group node that the current block is in.
+ if (dispatch) {
+ state.tr.lift(childBlocksRange!, depth - 1)
+ }
+ }
+
+ // the position in which we are inserting the current block content
+ let parentInsertPos: number = 0
+ if (parentBlock) {
+ parentInsertPos = parentPos + parentBlock.nodeSize - 1
+ }
+
+ if (dispatch) {
+ dispatch(
+ state.tr
+ // delete the current block content
+ .deleteRange(startPos, startPos + contentNode.nodeSize)
+ // insert the current block content into the parent heading
+ .insert(parentInsertPos, contentNode.content),
+ )
+
+ // set the selection to the join between the previous heading content and the new content inserted
+ // this needs to happen after the transaction above because the document now is "different", hence we need to set
+ // the selection to a new pos.
+ state.tr.setSelection(new TextSelection(state.doc.resolve(parentInsertPos)))
+ }
+
+ return true
+ }
+ }
+ return false
+ }),
+ // Convert a list into a normal group if the selection is at the start of the list
+ () =>
+ commands.command(({ state, view }) => {
+ const { group, container, depth, $pos } = getGroupInfoFromPos(state.selection.from, state)
+
+ if (group.attrs.listType !== 'div' && $pos.pos === $pos.start()) {
+ // If block is first in the group change group type
+ if (container && group.firstChild?.attrs.id === container.attrs.id) {
+ setTimeout(() => {
+ view.dispatch(
+ state.tr.setNodeMarkup($pos.before(depth), null, {
+ ...group.attrs,
+ listType: 'div',
+ listLevel: '1',
+ }),
+ )
+
+ this.editor.commands.UpdateGroupChildren(container, $pos, 2, group.attrs.listType, -1)
+ })
+
+ return true
+ }
+ }
+ return false
+ }),
+ // If previous block is media, node select it
+ () =>
+ commands.command(({ state, dispatch, view }) => {
+ const blockInfo = getBlockInfoFromPos(state.doc, state.selection.from)!
+ const prevBlockInfo = getBlockInfoFromPos(
+ state.doc,
+ state.selection.$anchor.pos - state.selection.$anchor.depth,
+ )
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ const isParagraph = blockInfo.contentType.name === 'paragraph'
+
+ if (selectionAtBlockStart) {
+ if (isParagraph) {
+ if (blockInfo.contentType.name === 'image') {
+ let tr = state.tr
+ const selection = NodeSelection.create(state.doc, blockInfo.startPos)
+ tr = tr.setSelection(selection)
+ view.dispatch(tr)
+ return true
+ }
+ if (!prevBlockInfo) return false
+ if (
+ ['file', 'embed', 'video', 'web-embed', 'math'].includes(prevBlockInfo.contentType.name) ||
+ (prevBlockInfo.contentType.name === 'image' && prevBlockInfo.contentNode.attrs.url.length === 0)
+ ) {
+ if (dispatch) {
+ const { startPos, contentNode } = blockInfo
+
+ state.tr
+ .setSelection(NodeSelection.create(state.doc, prevBlockInfo.startPos))
+ .deleteRange(startPos, startPos + contentNode.nodeSize)
+ return true
+ }
+ }
+ } else {
+ if (blockInfo.contentType.name === 'image') {
+ const { url } = blockInfo.contentNode.attrs
+ const strippedURL = url.replace('local://', '')
+ if (strippedURL.length !== 0) {
+ try {
+ window.fileSystem.deleteImage(strippedURL)
+ } catch (error) {
+ // eslint-disable-next-line no-console
+ console.error(`Received error: `, error)
+ }
+ }
+ }
+ return commands.BNUpdateBlock(state.selection.from, {
+ type: 'paragraph',
+ props: {},
+ })
+ }
+ }
+
+ return false
+ }),
+ // move blockID with content if selection is at the start of block, the block has content AND the block above is empty
+ () =>
+ commands.command(({ state, chain }) => {
+ const blockData = getBlockInfoFromPos(state.doc, state.selection.from)!
+ const groupData = getGroupInfoFromPos(state.selection.from!, state)
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ const prevBlockEndPos = blockData.startPos - 2
+ const prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+
+ if (
+ // selection is at the start of the block
+ selectionAtBlockStart &&
+ // current block is not empty
+ blockData.node.textContent.length > 0 &&
+ // the selected block is not the first block of the child
+ groupData.group.firstChild?.attrs.id !== blockData.id &&
+ // previous block is a blockContainer
+ prevBlockInfo.node.type.name === 'blockContainer' &&
+ // prev block is empty
+ prevBlockInfo.node.textContent.length === 0
+ ) {
+ chain().BNDeleteBlock(prevBlockInfo.startPos).run()
+
+ return true
+ }
+ return false
+ }),
+ // Merge blocks if a block is in the middle of a list
+ () =>
+ commands.command(({ state }) => {
+ const blockData = getBlockInfoFromPos(state.doc, state.selection.from)!
+ const groupData = getGroupInfoFromPos(state.selection.from!, state)
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ const prevBlockEndPos = blockData.startPos - 2
+ const prevBlockInfo = getBlockInfoFromPos(state.doc, prevBlockEndPos)
+
+ if (
+ // selection is at the start of the block
+ selectionAtBlockStart &&
+ // the selected block is not the first block of the child
+ groupData.group.firstChild?.attrs.id !== blockData.id &&
+ // previous block is a blockContainer
+ prevBlockInfo.node.type.name === 'blockContainer'
+ ) {
+ return commands.BNMergeBlocks(blockData.startPos - 1)
+ }
+ return false
+ }),
+ // Reverts block content type to a paragraph if the selection is at the start of the block.
+ () =>
+ commands.command(({ state }) => {
+ const { contentType } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const isParagraph = contentType.name === 'paragraph'
+
+ if (selectionAtBlockStart && !isParagraph) {
+ return commands.BNUpdateBlock(state.selection.from, {
+ type: 'paragraph',
+ props: {},
+ })
+ }
+
+ return false
+ }),
+ // Removes a level of nesting if the block is indented if the selection is at the start of the block.
+ () =>
+ commands.command(({ state }) => {
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ if (selectionAtBlockStart) {
+ return commands.liftListItem('blockContainer')
+ }
+
+ return false
+ }),
+ // Merges block with the previous one if it isn't indented, isn't the first block in the doc, and the selection
+ // is at the start of the block.
+ () =>
+ commands.command(({ state }) => {
+ const { depth, startPos } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockAtDocStart = startPos === 2
+
+ const posBetweenBlocks = startPos - 1
+
+ if (!blockAtDocStart && selectionAtBlockStart && selectionEmpty && depth === 2) {
+ return commands.BNMergeBlocks(posBetweenBlocks)
+ }
+
+ return false
+ }),
+ ])
+
+ const handleEnter = () =>
+ this.editor.commands.first(({ commands }) => [
+ // add a block on top of the current one so the block ID will follow the content
+ () =>
+ commands.command(({ state, chain }) => {
+ const data = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockEmpty = data.node.textContent.length === 0
+ const newBlockInsertionPos = data.startPos - 1
+
+ if (selectionAtBlockStart && selectionEmpty && !blockEmpty) {
+ chain()
+ .BNCreateBlock(newBlockInsertionPos)
+ // .setTextSelection(newBlockContentPos)
+ .run()
+
+ return true
+ }
+
+ return false
+ }),
+ // when the current block is a heading, do a special splitBlock to suggest heading hierarchy
+ () =>
+ commands.command(({ state, chain }) => {
+ const { contentNode } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+
+ // if selection is not in the beginning of the heading and is a heading,
+ // we need to check what we need to do
+ if (!selectionAtBlockStart && contentNode.type.name === 'heading') {
+ chain().deleteSelection().BNSplitHeadingBlock(state.selection.from).run()
+ return true
+ }
+
+ return false
+ }),
+ // Removes a level of nesting if the block is empty & indented, while the selection is also empty & at the start
+ // of the block.
+ () =>
+ commands.command(({ state }) => {
+ const { node, depth } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const { group } = getGroupInfoFromPos(state.selection.from, state)
+ const isLastBlock = group.lastChild?.attrs.id === node.attrs.id
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockEmpty = node.textContent.length === 0
+ const blockIndented = depth > 2
+
+ if (selectionAtBlockStart && selectionEmpty && blockEmpty && blockIndented && isLastBlock) {
+ return commands.liftListItem('blockContainer')
+ }
+
+ return false
+ }),
+ // Creates a new block and moves the selection to it if the current one is empty, while the selection is also
+ // empty & at the start of the block.
+ () =>
+ commands.command(({ state, chain }) => {
+ const { node, endPos } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const selectionAtBlockStart = state.selection.$anchor.parentOffset === 0
+ const selectionEmpty = state.selection.anchor === state.selection.head
+ const blockEmpty = node.textContent.length === 0
+
+ if (selectionAtBlockStart && selectionEmpty && blockEmpty) {
+ const newBlockInsertionPos = endPos + 1
+ const newBlockContentPos = newBlockInsertionPos + 2
+
+ chain().BNCreateBlock(newBlockInsertionPos).setTextSelection(newBlockContentPos).run()
+
+ return true
+ }
+
+ return false
+ }),
+
+ // Splits the current block, moving content inside that's after the cursor to a new text block below. Also
+ // deletes the selection beforehand, if it's not empty.
+ () =>
+ commands.command(({ state, chain }) => {
+ const { node } = getBlockInfoFromPos(state.doc, state.selection.from)!
+
+ const blockEmpty = node.textContent.length === 0
+
+ if (!blockEmpty) {
+ chain().deleteSelection().BNSplitBlock(state.selection.from, false).run()
+
+ return true
+ }
+
+ return false
+ }),
+ ])
+
+ const handleTab = () =>
+ this.editor.commands.first(({ commands }) => [
+ () =>
+ commands.command(({ state }) => {
+ // Find block group, block container and depth it is at
+ const { group } = getGroupInfoFromPos(state.selection.from, state)
+
+ if (group.type.name === 'blockGroup' && group.attrs.listType !== 'Group') {
+ setTimeout(() => {
+ this.editor.chain().sinkListItem('blockContainer').UpdateGroup(-1, group.attrs.listType, true).run()
+ })
+ return true
+ }
+ return false
+ }),
+ () => {
+ commands.command(({ state }) => {
+ const { group, container, level, $pos } = getGroupInfoFromPos(state.selection.from, state)
+
+ if (container)
+ setTimeout(() => {
+ this.editor
+ .chain()
+ .sinkListItem('blockContainer')
+ .UpdateGroupChildren(container, $pos, level + 1, group.attrs.listType, 1)
+ .run()
+ })
+ else commands.sinkListItem('blockContainer')
+ return true
+ })
+ return true
+ },
+ ])
+
+ const handleShiftTab = () =>
+ this.editor.commands.first(({ commands }) => [
+ () =>
+ commands.command(({ state }) => {
+ // Find block group, block container and depth it is at
+ const { group, container, level, $pos } = getGroupInfoFromPos(state.selection.from, state)
+
+ // if (group.attrs.listType === 'Group') return false
+
+ if (container) {
+ setTimeout(() => {
+ this.editor.commands.UpdateGroupChildren(container, $pos, level, group.attrs.listType, -1)
+ })
+ }
+ return false
+ }),
+ () => {
+ commands.liftListItem('blockContainer')
+ return true
+ },
+ ])
+
+ return {
+ Backspace: handleBackspace,
+ Enter: handleEnter,
+ // Always returning true for tab key presses ensures they're not captured by the browser. Otherwise, they blur the
+ // editor since the browser will try to use tab for keyboard navigation.
+ Tab: handleTab,
+ 'Shift-Tab': handleShiftTab,
+ 'Mod-Alt-0': () => this.editor.commands.BNCreateBlock(this.editor.state.selection.anchor + 2),
+ 'Mod-Alt-1': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'heading',
+ props: {
+ level: '1',
+ },
+ }),
+ 'Mod-Alt-2': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'heading',
+ props: {
+ level: '2',
+ },
+ }),
+ 'Mod-Alt-3': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'heading',
+ props: {
+ level: '3',
+ },
+ }),
+ 'Mod-Shift-7': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'bulletListItem',
+ props: {},
+ }),
+ 'Mod-Shift-8': () =>
+ this.editor.commands.BNUpdateBlock(this.editor.state.selection.anchor, {
+ type: 'numberedListItem',
+ props: {},
+ }),
+ 'Shift-ArrowLeft': () => {
+ const { state, view } = this.editor
+ const { selection } = state
+ const { id: selectedId } = getBlockInfoFromPos(state.doc, selection.from - 1)
+ if (selection.from <= 3) {
+ return false
+ }
+ if (selection.from === selection.$from.start()) {
+ let currentPos = selection.from - 1
+ let currentNode = state.doc.resolve(currentPos).parent
+ let { id: currentId } = getBlockInfoFromPos(state.doc, currentPos)
+ while (selectedId === currentId || ['blockContainer', 'blockGroup'].includes(currentNode.type.name)) {
+ currentPos--
+ currentNode = state.doc.resolve(currentPos).parent
+ currentId = getBlockInfoFromPos(state.doc, currentPos).id
+ }
greptile
logic: Infinite loop possible if currentId becomes undefined
suggested fix
while (selectedId === currentId || ['blockContainer', 'blockGroup'].includes(currentNode.type.name)) {
currentPos--
+ if (currentPos < 0) break;
currentNode = state.doc.resolve(currentPos).parent
+ const blockInfo = getBlockInfoFromPos(state.doc, currentPos)
+ if (!blockInfo) break;
+ currentId = blockInfo.id
}
diff block
+import { OAuth, showToast, Toast } from "@raycast/api";
+import { OAuth2Config, OAuth2Token, TokenResponse } from "./types";
+import { getConfig, ConfigurationError } from "../../utils/config";
+import crypto from "crypto";
+import fetch from "node-fetch";
+
+const OAUTH_SCOPE = ["https://www.googleapis.com/auth/sdm.service"];
+
+export class OAuthManager {
+ private static instance: OAuthManager;
+ private client: OAuth.PKCEClient;
+ private config: OAuth2Config;
+ private refreshTimer?: NodeJS.Timeout;
+ private isAuthorizing = false;
+ private refreshListeners: (() => void)[] = [];
+
+ private constructor() {
+ try {
+ const preferences = getConfig();
+
+ this.config = {
+ clientId: preferences.clientId,
+ clientSecret: preferences.clientSecret,
+ projectId: preferences.projectId,
+ scope: OAUTH_SCOPE,
+ };
+
+ this.client = new OAuth.PKCEClient({
+ redirectMethod: OAuth.RedirectMethod.Web,
+ providerName: "Google Nest",
+ providerIcon: "command-icon.png",
+ providerId: "google-nest",
+ description: "Connect your Google Nest account to view your camera feeds",
+ });
+ } catch (error) {
+ if (error instanceof ConfigurationError) {
+ showToast({
+ style: Toast.Style.Failure,
+ title: "Configuration Error",
+ message: error.message,
+ });
+ }
+ throw error;
+ }
+ }
+
+ public static getInstance(): OAuthManager {
+ if (!OAuthManager.instance) {
+ OAuthManager.instance = new OAuthManager();
+ }
+ return OAuthManager.instance;
+ }
+
+ private async generateCodeVerifier(): Promise<string> {
+ return crypto.randomBytes(32).toString("base64url");
+ }
+
+ private async generateCodeChallenge(verifier: string): Promise<string> {
+ const hash = crypto.createHash("sha256");
+ hash.update(verifier);
+ return hash.digest("base64url");
+ }
+
+ private createTokenWithExpiry(accessToken: string, refreshToken: string | undefined, expiresIn: number): OAuth2Token {
+ const updatedAt = new Date();
+ return {
+ accessToken,
+ refreshToken,
+ expiresIn,
+ updatedAt,
+ isExpired: () => {
+ const expirationTime = updatedAt.getTime() + expiresIn * 1000;
+ // Consider token expired 5 minutes before actual expiration
+ return Date.now() > expirationTime - 300000;
+ },
+ };
+ }
+
+ public async getValidToken(): Promise<OAuth2Token> {
+ try {
+ const token = await this.client.getTokens();
+
+ if (!token?.accessToken) {
+ if (this.isAuthorizing) {
+ return new Promise((resolve) => {
+ const checkToken = async () => {
+ const newToken = await this.client.getTokens();
+ if (newToken?.accessToken) {
+ resolve(newToken);
+ } else {
+ setTimeout(checkToken, 1000);
+ }
+ };
+ checkToken();
+ });
+ }
greptile
style: This polling mechanism could potentially create an infinite loop if the token is never obtained. Consider adding a timeout or maximum retry count.
diff block
+use std::{
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Once,
+ },
+ time::Duration,
+};
+
+use tokio::process::Command;
+use tracing_subscriber::prelude::*;
+
+pub async fn start_nats() {
+ Command::new("docker")
+ .arg("rm")
+ .arg("test-nats")
+ .arg("--force")
+ .status()
+ .await
+ .unwrap();
+
+ let status = Command::new("docker")
+ .arg("run")
+ .arg("--rm")
+ .arg("-p")
+ .arg("4222:4222")
+ .arg("--name")
+ .arg("test-nats")
+ .arg("nats:latest")
+ .status()
+ .await
+ .unwrap();
+
+ assert!(status.success());
+}
+
+pub async fn start_redis() {
+ Command::new("docker")
+ .arg("rm")
+ .arg("test-redis")
+ .arg("--force")
+ .status()
+ .await
+ .unwrap();
+
+ let status = Command::new("docker")
+ .arg("run")
+ .arg("--rm")
+ .arg("-p")
+ .arg("6379:6379")
+ .arg("--name")
+ .arg("test-redis")
+ .arg("redis:latest")
+ .status()
+ .await
+ .unwrap();
+
+ assert!(status.success());
+}
+
+pub async fn start_fdb() {
+ Command::new("docker")
+ .arg("rm")
+ .arg("test-fdb")
+ .arg("--force")
+ .status()
+ .await
+ .unwrap();
+
+ let status = Command::new("docker")
+ .arg("run")
+ .arg("--rm")
+ .arg("-p")
+ .arg("4500:4500")
+ .arg("--name")
+ .arg("test-fdb")
+ .arg("-e")
+ .arg("FDB_CLUSTER_FILE_CONTENTS=fdb:fdb@127.0.0.1:4500")
+ // See docs-internal/infrastructure/fdb/AVX.md
+ .arg("foundationdb/foundationdb:7.1.60")
+ .status()
+ .await
+ .unwrap();
+
+ assert!(status.success());
+}
+
+pub async fn create_fdb_db() {
greptile
style: infinite loop with 1 second delay could hang indefinitely if FDB fails to initialize. Should add a maximum retry count.
diff block
+use chirp_workflow::db::Database;
+use chirp_workflow::prelude::*;
+use serde_json::json;
+use uuid::Uuid;
+
+mod common;
+use common::*;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn fdb_sqlite_nats_driver() {
+ setup_tracing();
+ setup_dependencies().await;
+
+ let ctx = chirp_workflow::prelude::TestCtx::from_env::<db::DatabaseFdbSqliteNats>(
+ "fdb_sqlite_nats_driver",
+ true,
+ )
+ .await;
+ let config = ctx.config().clone();
+ let pools = ctx.pools().clone();
+
+ // // CLEAR DB
+ // pools
+ // .fdb()
+ // .unwrap()
+ // .run(|tx, _mc| async move {
+ // tx.clear_range(&[0], &[255]);
+ // Ok(())
+ // })
+ // .await
+ // .unwrap();
+ // tokio::time::sleep(std::time::Duration::from_millis(250)).await;
+
+ let mut reg = Registry::new();
+ reg.register_workflow::<def::Workflow>().unwrap();
+
+ let db = db::DatabaseFdbSqliteNats::from_pools(pools.clone()).unwrap();
+
+ // let workflow_id = Uuid::new_v4();
+ // let input = serde_json::value::RawValue::from_string("null".to_string()).unwrap();
+
+ // db.dispatch_workflow(
+ // Uuid::new_v4(),
+ // workflow_id,
+ // "workflow_name",
+ // Some(&json!({ "bald": "eagle" })),
+ // &input,
+ // false,
+ // )
+ // .await.unwrap();
+
+ // let res = db.find_workflow("workflow_name", &json!({
+ // "bald": "eagle",
+ // "fat": "man"
+ // })).await.unwrap();
+ // tracing::info!(?res);
+
+ // db.update_workflow_tags(workflow_id, "workflow_name", &json!({
+ // "bald": "eagle",
+ // "fat": "man"
+ // }))
+ // .await
+ // .unwrap();
+
+ // let res = db.find_workflow("workflow_name", &json!({
+ // "bald": "eagle",
+ // "fat": "man"
+ // })).await.unwrap();
+ // tracing::info!(?res);
+
+ let worker = Worker::new(reg.handle(), db);
+
+ tokio::spawn(async move {
+ ctx.workflow(def::Input {})
+ .tag("foo", "bar")
+ .dispatch()
+ .await
+ .unwrap();
+ })
+ .await
+ .unwrap();
+
+ // Start worker
+ tokio::select! {
+ res = worker.poll_start(config, pools) => res.unwrap(),
+ res = tokio::signal::ctrl_c() => res.unwrap(),
+ }
+}
+
+mod def {
+ use chirp_workflow::prelude::*;
+ use futures_util::FutureExt;
+
+ #[derive(Debug, Serialize, Deserialize)]
+ pub struct Input {}
+
+ #[workflow]
+ pub async fn test(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> {
+ tracing::info!("hello from workflow");
+
+ ctx.activity(TestActivityInput {
+ foo: "bar".to_string(),
+ })
+ .await?;
+
+ let workflow_id = ctx.workflow_id();
+ ctx.signal(MySignal {
+ test: Uuid::new_v4(),
+ })
+ .to_workflow(workflow_id)
+ .send()
+ .await?;
+
+ ctx.repeat(|ctx| {
+ async move {
+ let sig = ctx.listen::<MySignal>().await?;
+ tracing::info!(?sig);
+
+ tracing::info!("eepy");
+ ctx.sleep(12000).await?;
+ tracing::info!("eeped");
+
+ Ok(Loop::<()>::Continue)
+ }
+ .boxed()
+ })
+ .await?;
greptile
logic: This creates an infinite loop (Loop::Continue) with no exit condition. Consider adding a maximum iteration count or exit condition to prevent potential test hangs.
diff block
+import {
+ ActionPanel,
+ Action,
+ Detail,
+ Form,
+ getSelectedText,
+ Clipboard,
+ showToast,
+ Toast,
+ getPreferenceValues,
+ useNavigation,
+} from "@raycast/api";
+import { useState, useEffect } from "react";
+
+// Add a form component to edit text
+function EditTextForm({ text, onSave }) {
+ const [textValue, setTextValue] = useState(text);
+ const { pop } = useNavigation();
+
+ function handleSubmit() {
+ onSave(textValue);
+ pop();
+ }
+
+ return (
+ <Form
+ actions={
+ <ActionPanel>
+ <Action.SubmitForm title="Save Changes" onSubmit={handleSubmit} />
+ </ActionPanel>
+ }
+ >
+ <Form.TextArea
+ id="text"
+ title="Text"
+ placeholder="Edit your text"
+ value={textValue}
+ onChange={setTextValue}
+ autoFocus
+ />
+ </Form>
+ );
+}
+
+export default function Command() {
+ const [text, setText] = useState("");
+ const [isLoading, setIsLoading] = useState(true);
+ const [issues, setIssues] = useState([]);
+ const [correctedText, setCorrectedText] = useState("");
+ const [markdownContent, setMarkdownContent] = useState("");
+ const { push, pop } = useNavigation();
+
+ const preferences = getPreferenceValues();
+
+ useEffect(() => {
+ async function initialize() {
+ let textContent = "";
+
+ try {
+ // Try to get selected text first
+ const selectedText = await getSelectedText();
+ if (selectedText) {
+ setText(selectedText);
+ textContent = selectedText;
+ setMarkdownContent("```\n" + selectedText + "\n```");
+ } else {
+ // Fall back to clipboard content
+ try {
+ const clipboard = await Clipboard.readText();
+ if (clipboard) {
+ setText(clipboard);
+ textContent = clipboard;
+ setMarkdownContent("```\n" + clipboard + "\n```");
+ } else {
+ // Provide default content if no text is available
+ setMarkdownContent("```\nPaste or enter text to check\n```");
+ setIsLoading(false);
+ return;
+ }
+ } catch (error) {
+ console.error("Error reading clipboard:", error);
+ setMarkdownContent("```\nPaste or enter text to check\n```");
+ setIsLoading(false);
+ return;
+ }
+ }
+ } catch (error) {
+ console.error("Error getting selected text:", error);
+
+ // Try clipboard as fallback
+ try {
+ const clipboard = await Clipboard.readText();
+ if (clipboard) {
+ setText(clipboard);
+ textContent = clipboard;
+ setMarkdownContent("```\n" + clipboard + "\n```");
+ } else {
+ // Provide default content if no text is available
+ setMarkdownContent("```\nPaste or enter text to check\n```");
+ setIsLoading(false);
+ return;
+ }
+ } catch (error) {
+ console.error("Error reading clipboard:", error);
+ setMarkdownContent("```\nPaste or enter text to check\n```");
+ setIsLoading(false);
+ return;
+ }
+ }
+
+ // Automatically perform spell checking if we have text
+ if (textContent.trim()) {
+ await handleCheckSpelling(textContent);
+ } else {
+ setIsLoading(false);
+ }
+ }
+
+ initialize();
+ }, []);
+
+ async function handleCheckSpelling(initialText = null) {
+ const textToCheck = initialText || text;
+
+ if (!textToCheck.trim()) {
+ showToast({
+ style: Toast.Style.Failure,
+ title: "No text to check",
+ });
+ return;
+ }
+
+ setIsLoading(true);
+ setIssues([]);
+ setCorrectedText("");
+
+ try {
+ const apiKey = preferences.apiKey || null;
+ const language = preferences.preferredLanguage || "auto";
+ const endpoint = preferences.endpoint || "https://api.languagetool.org/v2/check";
+ const preferredVariants = [
+ preferences.languageVarietyEnglish,
+ preferences.languageVarietyGerman,
+ preferences.languageVarietyPortuguese,
+ preferences.languageVarietyCatalan,
+ ]
+ .filter(Boolean)
+ .filter((variant) => variant !== "-")
+ .join(",");
+
+ const level = preferences.level || "default";
+
+ const params = new URLSearchParams();
+ params.append("text", textToCheck);
+ params.append("language", language);
+ if (preferredVariants) {
+ params.append("preferredVariants", preferredVariants);
+ }
+ if (level !== "default") {
+ params.append("level", level);
+ }
+
+ const headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ };
+
+ if (apiKey) {
+ headers["Authorization"] = `Bearer ${apiKey}`;
+ }
+
+ const response = await fetch(endpoint, {
+ method: "POST",
+ headers,
+ body: params,
+ });
+
+ if (!response.ok) {
+ throw new Error(`API error: ${response.status}`);
+ }
+
+ const data = await response.json();
+
+ if (data.matches && data.matches.length > 0) {
+ // Sort matches by their position in reverse order
+ // (to prevent offset changes when applying corrections)
+ const sortedMatches = [...data.matches].sort((a, b) => b.offset - a.offset);
+
+ // Create a copy of the original text to apply corrections
+ let textWithCorrections = textToCheck;
+
+ // Apply corrections from end to beginning to avoid offset issues
+ sortedMatches.forEach((match) => {
+ const replacement =
+ match.replacements.length > 0
+ ? match.replacements[0].value
+ : match.context.text.substring(match.context.offset, match.context.offset + match.context.length);
+
+ textWithCorrections =
+ textWithCorrections.substring(0, match.offset) +
+ replacement +
+ textWithCorrections.substring(match.offset + match.length);
+ });
+
+ setCorrectedText(textWithCorrections);
+
+ // Sort matches by their position (from start to end for display)
+ const displayMatches = [...data.matches].sort((a, b) => a.offset - b.offset);
+ setIssues(displayMatches);
+
+ // Create interactive markdown with highlighted issues
+ createInteractiveMarkdown(textToCheck, displayMatches);
+
+ showToast({
+ style: Toast.Style.Success,
+ title: `Found ${data.matches.length} issue(s)`,
+ message: "Select an issue to see suggestions",
+ });
+ } else {
+ setMarkdownContent("```\n" + textToCheck + "\n```\n\n✅ **No issues found - your text looks good!**");
+ showToast({
+ style: Toast.Style.Success,
+ title: "No issues found",
+ message: "Your text looks good!",
+ });
+ }
+ } catch (error) {
+ showToast({
+ style: Toast.Style.Failure,
+ title: "Error checking spelling",
+ message: error instanceof Error ? error.message : "Unknown error",
+ });
+ } finally {
+ setIsLoading(false);
+ }
+ }
+
+ // Create interactive markdown with highlighted issues and links to fix them
+ function createInteractiveMarkdown(originalText, matches) {
+ let markdown = "";
+ let lastIndex = 0;
+
+ // Create issue index for quick reference
+ markdown += "## Selected Text\n\n";
+ markdown += "```\n";
+
+ // Insert text with issue markers
+ matches.forEach((match) => {
+ // Add text before this match
+ markdown += originalText.substring(lastIndex, match.offset);
+
+ // Add the issue with special highlighting
+ const issueText = originalText.substring(match.offset, match.offset + match.length);
+ markdown += `[${issueText}]`;
+
+ // Update last index
+ lastIndex = match.offset + match.length;
+ });
+
+ // Add any remaining text
+ markdown += originalText.substring(lastIndex);
+ markdown += "\n```\n\n";
+
+ if (matches.length > 0) {
+ // Add interactive section with issues and fixes
+ markdown += "## Issues Found\n\n";
+
+ matches.forEach((match, index) => {
+ const issueText = originalText.substring(match.offset, match.offset + match.length);
+
+ markdown += `### \`${index + 1}\` ${match.message}\n\n`;
+ markdown += `**Context:** "${match.context.text}"\n\n`;
+ markdown += `**Issue:** "${issueText}"\n\n`;
+
+ if (match.replacements && match.replacements.length > 0) {
+ markdown += "**Suggestions:** ";
+
+ // Only show the top 3 suggestions inline
+ const topSuggestions = match.replacements.slice(0, 3);
+ topSuggestions.forEach((replacement, repIndex) => {
+ if (repIndex > 0) markdown += ", ";
+ markdown += `${replacement.value}`;
+ });
+
+ markdown += `\n\n_Press \`${index + 1}\` or use the Action menu to apply fixes_\n\n`;
+ }
+ });
+ } else {
+ markdown += "### ✅ **No issues found - your text looks good!**";
+ markdown += "\n\n_Copy or Press `Enter` to paste the text_\n\n";
+ }
+
+ setMarkdownContent(markdown);
+ }
+
+ // Handle updating text after editing
+ function handleTextEdit(newText) {
+ setText(newText);
+ setMarkdownContent("```\n" + newText + "\n```");
+ setIssues([]);
+ setCorrectedText("");
+
+ // Automatically check spelling after editing if there's text
+ if (newText.trim()) {
+ handleCheckSpelling(newText);
+ }
+ }
+
+ // Handle fixing a specific issue with a selected replacement
+ function handleFixIssue(issueIndex, replacement) {
+ if (!issues[issueIndex]) {
+ return;
+ }
+
+ const issue = issues[issueIndex];
+
+ // Create new text with this specific fix applied
+ const newText = text.substring(0, issue.offset) + replacement + text.substring(issue.offset + issue.length);
+
+ // Update text state and recreate markdown
+ setText(newText);
+
+ // Update remaining issues by adjusting offsets
+ const lengthDiff = replacement.length - issue.length;
+
+ const updatedIssues = issues
+ .filter((_, index) => index !== issueIndex) // Remove the fixed issue
+ .map((issue) => {
+ // Only adjust offsets for issues that come after the fixed one
+ if (issue.offset > issues[issueIndex].offset) {
+ return {
+ ...issue,
+ offset: issue.offset + lengthDiff,
+ };
+ }
+ return issue;
+ });
+
+ setIssues(updatedIssues);
+
+ // Recreate the markdown with updated issues
+ createInteractiveMarkdown(newText, updatedIssues);
+
+ showToast({
+ style: Toast.Style.Success,
+ title: "Fixed issue",
+ message: `Replaced "${issues[issueIndex].context.text.substring(
+ issues[issueIndex].context.offset,
+ issues[issueIndex].context.offset + issues[issueIndex].context.length,
+ )}" with "${replacement}"`,
+ });
+
+ // Return to main screen
+ pop();
+ }
+
+ async function handlePasteResult() {
+ if (!text.trim()) {
+ showToast({
+ style: Toast.Style.Failure,
+ title: "No text to process",
+ });
+ return;
+ }
+
+ setIsLoading(true);
+
+ try {
+ if (!correctedText && issues.length > 0) {
+ // If we haven't generated corrected text but we have issues,
+ // run the check spelling again to generate corrected text
+ await handleCheckSpelling();
+ }
greptile
logic: Potential infinite loop if handleCheckSpelling keeps failing to generate correctedText
diff block
if (input.website !== undefined && input.website !== entity.website) {
data.website = input.website;
}
+ if (input.parent !== undefined && input.parent !== entity.parentId) {
+ if (input.parent) {
+ const [parent] = await db
+ .select()
+ .from(entities)
+ .where(eq(entities.id, input.parent))
+ .limit(1);
+ if (!parent) {
+ throw new TRPCError({
+ message: "Parent entity not found.",
+ code: "NOT_FOUND",
+ });
+ }
+
+ // Check for direct recursion (can't set itself as parent)
+ if (parent.id === entity.id) {
+ throw new TRPCError({
+ message: "An entity cannot be its own parent.",
+ code: "BAD_REQUEST",
+ });
+ }
+
+ // Check for circular references
+ let currentParent = parent;
+ const visitedParents = new Set([entity.id]);
+ while (currentParent.parentId) {
+ if (visitedParents.has(currentParent.parentId)) {
+ throw new TRPCError({
+ message: "Circular parent reference detected.",
+ code: "BAD_REQUEST",
+ });
+ }
+ visitedParents.add(currentParent.id);
+
+ const [nextParent] = await db
+ .select()
+ .from(entities)
+ .where(eq(entities.id, currentParent.parentId))
+ .limit(1);
+
+ if (!nextParent) break;
+ currentParent = nextParent;
greptile
logic: potential infinite loop if nextParent is not found but currentParent.parentId exists
Want to avoid this bug in your codebase? Try Greptile.
Avoid this bug!