Skip to content

Commit b9084d1

Browse files
rtibblesbotclaude
andcommitted
fix: move ruff exclude to top-level so excluded files are skipped
[tool.ruff.lint].exclude is silently ignored by ruff — files listed there were still being linted and formatted. Move all excluded files to [tool.ruff].exclude (top-level) which correctly excludes from both linting and formatting. Revert formatting changes to those files. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 91c6129 commit b9084d1

5 files changed

Lines changed: 251 additions & 77 deletions

File tree

morango/sync/backends/postgres.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,9 @@
2323

2424
class SQLWrapper(BaseSQLWrapper):
2525
backend = "postgresql"
26-
create_temporary_table_template = "CREATE TEMP TABLE {name} ({fields}) ON COMMIT DROP"
26+
create_temporary_table_template = (
27+
"CREATE TEMP TABLE {name} ({fields}) ON COMMIT DROP"
28+
)
2729

2830
def _is_transaction_isolation_error(self, error):
2931
"""
@@ -34,7 +36,7 @@ def _is_transaction_isolation_error(self, error):
3436
from psycopg2.extensions import TransactionRollbackError
3537

3638
# Django can wrap errors, adding it to the `__cause__` attribute
37-
for e in (error, getattr(error, "__cause__", None)):
39+
for e in (error, getattr(error, '__cause__', None)):
3840
if isinstance(e, TransactionRollbackError):
3941
return True
4042
return False
@@ -97,7 +99,9 @@ def _prepare_with_values(self, name, fields, db_values):
9799
def _prepare_casted_fields(self, fields):
98100
return ", ".join(
99101
map(
100-
lambda f: "{f}::{type}".format(f=f.column, type=f.rel_db_type(self.connection)),
102+
lambda f: "{f}::{type}".format(
103+
f=f.column, type=f.rel_db_type(self.connection)
104+
),
101105
fields,
102106
)
103107
)
@@ -226,7 +230,9 @@ def _dequeuing_merge_conflict_buffer(self, cursor, current_id, transfersession_i
226230

227231
cursor.execute(merge_conflict_store)
228232

229-
def _dequeuing_update_rmcs_last_saved_by(self, cursor, current_id, transfersession_id):
233+
def _dequeuing_update_rmcs_last_saved_by(
234+
self, cursor, current_id, transfersession_id
235+
):
230236
# update or create rmc for merge conflicts with local instance id
231237
merge_conflict_store = """
232238
WITH new_values as
@@ -357,7 +363,7 @@ def _execute_lock(self, key1, key2=None, unlock=False, session=False, shared=Fal
357363
xact_="" if session else "xact_",
358364
lock="unlock" if unlock else "lock",
359365
_shared="_shared" if shared else "",
360-
keys=", ".join(["%s"] * len(keys)),
366+
keys=", ".join(["%s"] * len(keys))
361367
)
362368

363369
with self.connection.cursor() as c:

morango/sync/backends/sqlite.py

Lines changed: 31 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,14 @@ def _bulk_full_record_upsert(self, cursor, table_name, fields, db_values):
2020
# calculate and create equal sized chunks of data to insert incrementally
2121
num_of_rows_able_to_insert = calculate_max_sqlite_variables() // len(fields)
2222
num_of_values_able_to_insert = num_of_rows_able_to_insert * len(fields)
23-
value_chunks = [db_values[x : x + num_of_values_able_to_insert] for x in range(0, len(db_values), num_of_values_able_to_insert)]
24-
placeholder_chunks = [placeholder_list[x : x + num_of_rows_able_to_insert] for x in range(0, len(placeholder_list), num_of_rows_able_to_insert)]
23+
value_chunks = [
24+
db_values[x : x + num_of_values_able_to_insert]
25+
for x in range(0, len(db_values), num_of_values_able_to_insert)
26+
]
27+
placeholder_chunks = [
28+
placeholder_list[x : x + num_of_rows_able_to_insert]
29+
for x in range(0, len(placeholder_list), num_of_rows_able_to_insert)
30+
]
2531
# insert data chunks
2632
fields_str = str(tuple(str(f.attname) for f in fields)).replace("'", "")
2733
for values, params in zip(value_chunks, placeholder_chunks):
@@ -40,9 +46,14 @@ def _bulk_full_record_upsert(self, cursor, table_name, fields, db_values):
4046
def _bulk_insert(self, cursor, table_name, fields, db_values):
4147
num_of_rows_able_to_insert = calculate_max_sqlite_variables() // len(fields)
4248
num_of_values_able_to_insert = num_of_rows_able_to_insert * len(fields)
43-
value_chunks = [db_values[x : x + num_of_values_able_to_insert] for x in range(0, len(db_values), num_of_values_able_to_insert)]
49+
value_chunks = [
50+
db_values[x : x + num_of_values_able_to_insert]
51+
for x in range(0, len(db_values), num_of_values_able_to_insert)
52+
]
4453
for value_chunk in value_chunks:
45-
super(SQLWrapper, self)._bulk_insert(cursor, table_name, fields, value_chunk)
54+
super(SQLWrapper, self)._bulk_insert(
55+
cursor, table_name, fields, value_chunk
56+
)
4657

4758
def _bulk_update(self, cursor, table_name, fields, db_values):
4859
"""
@@ -53,9 +64,14 @@ def _bulk_update(self, cursor, table_name, fields, db_values):
5364
# calculate and create equal sized chunks of data to update incrementally
5465
# for every field we're updating, we'll require 3 parameters
5566
num_update_fields = len(fields) - 1
56-
num_of_rows_able_to_update = calculate_max_sqlite_variables() // num_update_fields // 3
67+
num_of_rows_able_to_update = (
68+
calculate_max_sqlite_variables() // num_update_fields // 3
69+
)
5770
num_of_values_able_to_update = num_of_rows_able_to_update * len(fields)
58-
value_chunks = [db_values[x : x + num_of_values_able_to_update] for x in range(0, len(db_values), num_of_values_able_to_update)]
71+
value_chunks = [
72+
db_values[x : x + num_of_values_able_to_update]
73+
for x in range(0, len(db_values), num_of_values_able_to_update)
74+
]
5975
pk = get_pk_field(fields)
6076

6177
# insert data chunks
@@ -66,7 +82,9 @@ def _bulk_update(self, cursor, table_name, fields, db_values):
6682
for field in fields:
6783
if field == pk:
6884
continue
69-
set_field_sql = " {field} = (CASE {pk_field}".format(field=field.column, pk_field=pk.column)
85+
set_field_sql = " {field} = (CASE {pk_field}".format(
86+
field=field.column, pk_field=pk.column
87+
)
7088
for y in range(0, len(values), len(fields)):
7189
value_set = values[y : y + len(fields)]
7290
set_field_sql += " WHEN %s THEN %s"
@@ -82,7 +100,9 @@ def _bulk_update(self, cursor, table_name, fields, db_values):
82100
table_name=table_name,
83101
set_sql=set_sql[:-1],
84102
pk_field=pk.column,
85-
placeholder_str="({})".format(",".join("%s" for _ in range(len(pk_params)))),
103+
placeholder_str="({})".format(
104+
",".join("%s" for _ in range(len(pk_params)))
105+
),
86106
)
87107
# use DB-APIs parameter substitution (2nd parameter expects a sequence)
88108
cursor.execute(update, params)
@@ -142,7 +162,9 @@ def _dequeuing_merge_conflict_buffer(self, cursor, current_id, transfersession_i
142162
)
143163
cursor.execute(merge_conflict_store)
144164

145-
def _dequeuing_update_rmcs_last_saved_by(self, cursor, current_id, transfersession_id):
165+
def _dequeuing_update_rmcs_last_saved_by(
166+
self, cursor, current_id, transfersession_id
167+
):
146168
# update or create rmc for merge conflicts with local instance id
147169
merge_conflict_store = """REPLACE INTO {rmc} (instance_id, counter, store_model_id)
148170
SELECT '{current_instance_id}', {current_instance_counter}, store.id

0 commit comments

Comments
 (0)