Skip to main content

Documentation Index

Fetch the complete documentation index at: https://mintlify.com/questdb/questdb/llms.txt

Use this file to discover all available pages before exploring further.

Execute queries using QuestDB’s PostgreSQL wire protocol interface. This page covers query execution patterns, prepared statements, and best practices.

Simple queries

The simple query protocol executes SQL statements directly without parameter binding.

Python

import psycopg2

conn = psycopg2.connect(
    host='localhost',
    port=8812,
    user='admin',
    password='quest',
    database='qdb'
)

cur = conn.cursor()

# SELECT query
cur.execute('SELECT * FROM trades WHERE symbol = \'BTC-USD\' LIMIT 10')
rows = cur.fetchall()

for row in rows:
    print(f"Symbol: {row[0]}, Price: {row[1]}")

cur.close()
conn.close()

Java

import java.sql.*;

Connection conn = DriverManager.getConnection(
    "jdbc:postgresql://localhost:8812/qdb",
    "admin",
    "quest"
);

Statement stmt = conn.createStatement();
ResultSet rs = stmt.executeQuery(
    "SELECT * FROM trades WHERE symbol = 'BTC-USD' LIMIT 10"
);

while (rs.next()) {
    String symbol = rs.getString("symbol");
    double price = rs.getDouble("price");
    System.out.println(symbol + ": " + price);
}

rs.close();
stmt.close();
conn.close();

Node.js

const { Client } = require('pg');

const client = new Client({
  host: 'localhost',
  port: 8812,
  user: 'admin',
  password: 'quest',
  database: 'qdb'
});

await client.connect();

const result = await client.query(
  "SELECT * FROM trades WHERE symbol = 'BTC-USD' LIMIT 10"
);

console.log(result.rows);

await client.end();

Prepared statements

Prepared statements with parameter binding offer better performance and security for repeated queries.

Python (psycopg2)

import psycopg2

conn = psycopg2.connect(
    host='localhost',
    port=8812,
    user='admin',
    password='quest',
    database='qdb'
)

cur = conn.cursor()

# SELECT with parameters
symbol = 'BTC-USD'
cur.execute(
    'SELECT * FROM trades WHERE symbol = %s LIMIT 10',
    (symbol,)
)

rows = cur.fetchall()
for row in rows:
    print(row)

# INSERT with parameters
cur.execute(
    'INSERT INTO trades VALUES (%s, %s, %s, %s)',
    ('ETH-USD', 3500.50, 100, '2024-01-15T10:30:00.000000Z')
)

conn.commit()
cur.close()
conn.close()

Java (JDBC)

PreparedStatement ps = conn.prepareStatement(
    "SELECT * FROM trades WHERE symbol = ? AND timestamp > ?"
);

ps.setString(1, "BTC-USD");
ps.setTimestamp(2, Timestamp.valueOf("2024-01-01 00:00:00"));

ResultSet rs = ps.executeQuery();

while (rs.next()) {
    System.out.println(rs.getString("symbol"));
}

rs.close();
ps.close();

Node.js (pg)

const result = await client.query(
  'SELECT * FROM trades WHERE symbol = $1 AND price > $2',
  ['BTC-USD', 50000]
);

console.log(result.rows);

Go (pgx)

rows, err := conn.Query(ctx,
    "SELECT * FROM trades WHERE symbol = $1 AND price > $2",
    "BTC-USD", 50000.0,
)
if err != nil {
    log.Fatal(err)
}
defer rows.Close()

for rows.Next() {
    var symbol string
    var price float64
    
    err = rows.Scan(&symbol, &price)
    if err != nil {
        log.Fatal(err)
    }
    
    fmt.Printf("%s: %.2f\n", symbol, price)
}

Named prepared statements

Create reusable prepared statements across multiple executions.

Python

import psycopg2

conn = psycopg2.connect(
    host='localhost',
    port=8812,
    user='admin',
    password='quest',
    database='qdb'
)

cur = conn.cursor()

# Prepare a named statement
cur.execute(
    'PREPARE get_trades AS SELECT * FROM trades WHERE symbol = $1'
)

# Execute the prepared statement multiple times
cur.execute('EXECUTE get_trades(%s)', ('BTC-USD',))
rows1 = cur.fetchall()

cur.execute('EXECUTE get_trades(%s)', ('ETH-USD',))
rows2 = cur.fetchall()

# Deallocate when done
cur.execute('DEALLOCATE get_trades')

conn.close()

Java

// Prepare once
PreparedStatement ps = conn.prepareStatement(
    "SELECT * FROM trades WHERE symbol = ?",
    "get_trades"  // Named statement
);

// Execute multiple times
ps.setString(1, "BTC-USD");
ResultSet rs1 = ps.executeQuery();

ps.setString(1, "ETH-USD");
ResultSet rs2 = ps.executeQuery();

ps.close();  // Automatically deallocates

Batch operations

Execute multiple statements efficiently in a batch.

Python

import psycopg2
import psycopg2.extras

conn = psycopg2.connect(
    host='localhost',
    port=8812,
    user='admin',
    password='quest',
    database='qdb'
)

cur = conn.cursor()

# Batch insert
data = [
    ('BTC-USD', 50000.0, 100, '2024-01-15T10:30:00.000000Z'),
    ('ETH-USD', 3500.0, 200, '2024-01-15T10:31:00.000000Z'),
    ('SOL-USD', 120.0, 500, '2024-01-15T10:32:00.000000Z'),
]

psycopg2.extras.execute_batch(
    cur,
    'INSERT INTO trades VALUES (%s, %s, %s, %s)',
    data
)

conn.commit()
cur.close()
conn.close()

Java

conn.setAutoCommit(false);

PreparedStatement ps = conn.prepareStatement(
    "INSERT INTO trades VALUES (?, ?, ?, ?)"
);

for (Trade trade : trades) {
    ps.setString(1, trade.getSymbol());
    ps.setDouble(2, trade.getPrice());
    ps.setLong(3, trade.getVolume());
    ps.setTimestamp(4, trade.getTimestamp());
    ps.addBatch();
}

ps.executeBatch();
conn.commit();
ps.close();

Node.js

const { Pool } = require('pg');

const pool = new Pool({
  host: 'localhost',
  port: 8812,
  user: 'admin',
  password: 'quest',
  database: 'qdb'
});

const client = await pool.connect();

try {
  await client.query('BEGIN');
  
  const insertQuery = 'INSERT INTO trades VALUES ($1, $2, $3, $4)';
  
  for (const trade of trades) {
    await client.query(insertQuery, [
      trade.symbol,
      trade.price,
      trade.volume,
      trade.timestamp
    ]);
  }
  
  await client.query('COMMIT');
} catch (e) {
  await client.query('ROLLBACK');
  throw e;
} finally {
  client.release();
}

Transactions

QuestDB supports explicit transaction control via PostgreSQL protocol.

Python

import psycopg2

conn = psycopg2.connect(
    host='localhost',
    port=8812,
    user='admin',
    password='quest',
    database='qdb'
)

cur = conn.cursor()

try:
    # Begin transaction (implicit with psycopg2)
    cur.execute('INSERT INTO trades VALUES (%s, %s, %s, %s)',
                ('BTC-USD', 50000.0, 100, '2024-01-15T10:30:00.000000Z'))
    
    cur.execute('INSERT INTO trades VALUES (%s, %s, %s, %s)',
                ('ETH-USD', 3500.0, 200, '2024-01-15T10:31:00.000000Z'))
    
    # Commit transaction
    conn.commit()
except Exception as e:
    # Rollback on error
    conn.rollback()
    print(f"Error: {e}")
finally:
    cur.close()
    conn.close()

Java

conn.setAutoCommit(false);

try {
    Statement stmt = conn.createStatement();
    
    stmt.execute(
        "INSERT INTO trades VALUES ('BTC-USD', 50000.0, 100, '2024-01-15T10:30:00.000000Z')"
    );
    
    stmt.execute(
        "INSERT INTO trades VALUES ('ETH-USD', 3500.0, 200, '2024-01-15T10:31:00.000000Z')"
    );
    
    conn.commit();
    stmt.close();
} catch (SQLException e) {
    conn.rollback();
    throw e;
}

Time-series queries

QuestDB’s SQL extensions for time-series data work via PostgreSQL protocol.

Latest by

SELECT * FROM trades
LATEST ON timestamp PARTITION BY symbol;

Sample by

SELECT timestamp, symbol, avg(price) AS avg_price
FROM trades
WHERE timestamp > '2024-01-01T00:00:00.000000Z'
SAMPLE BY 1h
ALIGN TO CALENDAR;

ASOF JOIN

SELECT *
FROM trades
ASOF JOIN quotes
WHERE trades.symbol = quotes.symbol;

Python example

cur.execute('''
    SELECT timestamp, symbol, avg(price) AS avg_price
    FROM trades
    WHERE timestamp IN '2024-01-15'
    SAMPLE BY 1h
''')

rows = cur.fetchall()
for row in rows:
    print(f"{row[0]}: {row[1]} - {row[2]}")

Working with arrays

QuestDB supports multidimensional arrays via PostgreSQL protocol.
import psycopg2

conn = psycopg2.connect(
    host='localhost',
    port=8812,
    user='admin',
    password='quest',
    database='qdb'
)

cur = conn.cursor()

# Create table with array column
cur.execute('''
    CREATE TABLE sensor_data (
        timestamp TIMESTAMP,
        sensor_id SYMBOL,
        readings DOUBLE[]
    ) TIMESTAMP(timestamp)
''')

# Insert array data
cur.execute(
    'INSERT INTO sensor_data VALUES (%s, %s, %s)',
    ('2024-01-15T10:30:00.000000Z', 'sensor1', [1.2, 3.4, 5.6])
)

conn.commit()

# Query array data
cur.execute('SELECT * FROM sensor_data')
rows = cur.fetchall()

for row in rows:
    print(f"Readings: {row[2]}")

conn.close()

Error handling

Python

import psycopg2
from psycopg2 import Error

try:
    conn = psycopg2.connect(
        host='localhost',
        port=8812,
        user='admin',
        password='quest',
        database='qdb'
    )
    
    cur = conn.cursor()
    cur.execute('SELECT * FROM non_existent_table')
    
except psycopg2.OperationalError as e:
    print(f"Connection error: {e}")
except psycopg2.ProgrammingError as e:
    print(f"Query error: {e}")
except Error as e:
    print(f"Database error: {e}")
finally:
    if conn:
        conn.close()

Java

try {
    Statement stmt = conn.createStatement();
    ResultSet rs = stmt.executeQuery("SELECT * FROM trades");
    // Process results
} catch (SQLException e) {
    System.err.println("SQL Error: " + e.getMessage());
    System.err.println("SQL State: " + e.getSQLState());
    System.err.println("Error Code: " + e.getErrorCode());
}

Query timeout

Set query execution timeout to prevent long-running queries.

Python

import psycopg2

conn = psycopg2.connect(
    host='localhost',
    port=8812,
    user='admin',
    password='quest',
    database='qdb',
    options='-c statement_timeout=30000'  # 30 seconds
)

Java

Statement stmt = conn.createStatement();
stmt.setQueryTimeout(30);  // 30 seconds

ResultSet rs = stmt.executeQuery("SELECT * FROM trades");

Configuration

Set default timeout in server.conf:
server.conf
cairo.sql.query.timeout=60000  # 60 seconds

Performance tips

  1. Use prepared statements: Avoid SQL injection and improve performance
    # Good
    cur.execute('SELECT * FROM trades WHERE symbol = %s', (symbol,))
    
    # Avoid
    cur.execute(f'SELECT * FROM trades WHERE symbol = \'{symbol}\'')
    
  2. Enable query caching: Caching is enabled by default for better performance
  3. Use connection pooling: Reuse connections instead of creating new ones
  4. Batch inserts: Use batch operations for multiple inserts
    psycopg2.extras.execute_batch(cur, query, data)
    
  5. Optimize pipeline capacity: Increase for large batches
    pg.pipeline.capacity=128  # Default is 64
    
  6. Use binary format: Binary format reduces serialization overhead (driver-dependent)
  7. Partition pruning: Use timestamp filters to leverage partitioning
    SELECT * FROM trades
    WHERE timestamp IN '2024-01-15';  -- Uses partition pruning
    

Limitations

  • BLOB size: Maximum 512KB by default (configurable via pg.max.blob.size.on.query)
  • DELETE statement: Not supported; use ALTER TABLE DROP PARTITION instead
  • UPDATE statement: Limited support for UPDATE operations
  • System catalogs: Partial PostgreSQL system catalog implementation
  • Named statement limit: Default 10,000 per connection

Next steps

SQL Reference

Learn QuestDB’s SQL dialect and extensions

Configuration

Configure PostgreSQL wire protocol settings