-
Notifications
You must be signed in to change notification settings - Fork 25
Expand file tree
/
Copy pathefficient_did_results.py
More file actions
368 lines (334 loc) · 13.6 KB
/
efficient_did_results.py
File metadata and controls
368 lines (334 loc) · 13.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
"""
Result container for the Efficient DiD estimator.
Follows the CallawaySantAnnaResults pattern: dataclass with summary(),
to_dataframe(), and significance properties.
"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from diff_diff.results import _format_survey_block, _get_significance_stars
if TYPE_CHECKING:
from diff_diff.efficient_did_bootstrap import EDiDBootstrapResults
@dataclass
class HausmanPretestResult:
"""Result of Hausman pretest for PT-All vs PT-Post (Theorem A.1).
Under H0 (PT-All holds), both estimators are consistent but PT-All
is efficient. Rejection suggests PT-All is too strong; use PT-Post.
"""
statistic: float
"""Hausman H statistic."""
p_value: float
"""Chi-squared p-value."""
df: int
"""Degrees of freedom (effective rank of V)."""
reject: bool
"""True if p_value < alpha."""
alpha: float
"""Significance level used."""
att_all: float
"""Overall ATT under PT-All."""
att_post: float
"""Overall ATT under PT-Post."""
recommendation: str
"""``"pt_all"`` if fail to reject, ``"pt_post"`` if reject, ``"inconclusive"`` if test unavailable."""
gt_details: Optional[pd.DataFrame] = None
"""Per-event-study-horizon details: relative_period, es_all, es_post, delta."""
def __repr__(self) -> str:
return (
f"HausmanPretestResult(H={self.statistic:.3f}, p={self.p_value:.4f}, "
f"df={self.df}, recommend={self.recommendation})"
)
@dataclass
class EfficientDiDResults:
"""
Results from Efficient DiD (Chen, Sant'Anna & Xie 2025) estimation.
Stores group-time ATT(g,t) estimates with efficient weights, plus
optional aggregations (overall ATT, event study, group effects).
Attributes
----------
group_time_effects : dict
``{(g, t): {'effect', 'se', 't_stat', 'p_value', 'conf_int',
'n_treated', 'n_control'}}``
overall_att : float
Overall ATT (cohort-size weighted average of post-treatment
group-time effects, matching CallawaySantAnna convention).
overall_se : float
Standard error of overall ATT.
overall_t_stat : float
t-statistic for overall ATT.
overall_p_value : float
p-value for overall ATT.
overall_conf_int : tuple
Confidence interval for overall ATT.
groups : list
Treatment cohort identifiers.
time_periods : list
All time periods.
n_obs : int
Total observations (units x periods).
n_treated_units : int
Number of ever-treated units.
n_control_units : int
Number of never-treated units.
alpha : float
Significance level.
pt_assumption : str
``"all"`` or ``"post"``.
anticipation : int
Number of anticipation periods used.
n_bootstrap : int
Number of bootstrap iterations (0 = analytical only).
bootstrap_weights : str
Bootstrap weight distribution (``"rademacher"``, ``"mammen"``, ``"webb"``).
seed : int or None
Random seed used for bootstrap.
event_study_effects : dict, optional
``{relative_time: effect_dict}``
group_effects : dict, optional
``{group: effect_dict}``
efficient_weights : dict, optional
``{(g, t): ndarray}`` — diagnostic: weight vector per target.
omega_condition_numbers : dict, optional
``{(g, t): float}`` — diagnostic: Omega* condition numbers.
influence_functions : dict, optional
``{(g, t): ndarray(n_units,)}`` — per-unit EIF values for each
group-time cell. Only populated when ``store_eif=True`` in
:meth:`~EfficientDiD.fit` (used internally by ``hausman_pretest``).
bootstrap_results : EDiDBootstrapResults, optional
Bootstrap inference results.
estimation_path : str
``"nocov"`` or ``"dr"`` — which estimation path was used.
sieve_k_max : int or None
Maximum polynomial degree for sieve ratio estimation.
sieve_criterion : str
Information criterion used (``"aic"`` or ``"bic"``).
ratio_clip : float
Clipping bound for sieve propensity ratios.
kernel_bandwidth : float or None
Bandwidth used for kernel-smoothed conditional Omega*.
"""
group_time_effects: Dict[Tuple[Any, Any], Dict[str, Any]]
overall_att: float
overall_se: float
overall_t_stat: float
overall_p_value: float
overall_conf_int: Tuple[float, float]
groups: List[Any]
time_periods: List[Any]
n_obs: int
n_treated_units: int
n_control_units: int
alpha: float = 0.05
pt_assumption: str = "all"
anticipation: int = 0
n_bootstrap: int = 0
bootstrap_weights: str = "rademacher"
seed: Optional[int] = None
event_study_effects: Optional[Dict[int, Dict[str, Any]]] = field(default=None)
group_effects: Optional[Dict[Any, Dict[str, Any]]] = field(default=None)
efficient_weights: Optional[Dict[Tuple[Any, Any], "np.ndarray"]] = field(
default=None, repr=False
)
omega_condition_numbers: Optional[Dict[Tuple[Any, Any], float]] = field(
default=None, repr=False
)
control_group: str = "never_treated"
influence_functions: Optional[Dict[Tuple[Any, Any], "np.ndarray"]] = field(
default=None, repr=False
)
bootstrap_results: Optional["EDiDBootstrapResults"] = field(default=None, repr=False)
estimation_path: str = "nocov"
sieve_k_max: Optional[int] = None
sieve_criterion: str = "bic"
ratio_clip: float = 20.0
kernel_bandwidth: Optional[float] = None
# Survey design metadata (SurveyMetadata instance from diff_diff.survey)
survey_metadata: Optional[Any] = field(default=None)
def __repr__(self) -> str:
sig = _get_significance_stars(self.overall_p_value)
path = "DR" if self.estimation_path == "dr" else "nocov"
return (
f"EfficientDiDResults(ATT={self.overall_att:.4f}{sig}, "
f"SE={self.overall_se:.4f}, "
f"pt={self.pt_assumption}, path={path}, "
f"n_groups={len(self.groups)}, "
f"n_periods={len(self.time_periods)})"
)
@property
def coef_var(self) -> float:
"""Coefficient of variation: SE / |overall ATT|. NaN when ATT is 0 or SE non-finite."""
if not (np.isfinite(self.overall_se) and self.overall_se >= 0):
return np.nan
if not np.isfinite(self.overall_att) or self.overall_att == 0:
return np.nan
return self.overall_se / abs(self.overall_att)
def summary(self, alpha: Optional[float] = None) -> str:
"""Generate formatted summary of estimation results."""
alpha = alpha or self.alpha
conf_level = int((1 - alpha) * 100)
lines = [
"=" * 85,
"Efficient DiD (Chen-Sant'Anna-Xie 2025) Results".center(85),
"=" * 85,
"",
f"{'Total observations:':<30} {self.n_obs:>10}",
f"{'Treated units:':<30} {self.n_treated_units:>10}",
f"{'Control units:':<30} {self.n_control_units:>10}",
f"{'Treatment cohorts:':<30} {len(self.groups):>10}",
f"{'Time periods:':<30} {len(self.time_periods):>10}",
f"{'PT assumption:':<30} {self.pt_assumption:>10}",
f"{'Estimation path:':<30} {'doubly robust' if self.estimation_path == 'dr' else 'no covariates':>10}",
]
if self.control_group != "never_treated":
lines.append(f"{'Control group:':<30} {self.control_group:>10}")
if self.anticipation > 0:
lines.append(f"{'Anticipation periods:':<30} {self.anticipation:>10}")
if self.n_bootstrap > 0:
lines.append(f"{'Bootstrap:':<30} {self.n_bootstrap:>10} ({self.bootstrap_weights})")
lines.append("")
# Add survey design info
if self.survey_metadata is not None:
sm = self.survey_metadata
lines.extend(_format_survey_block(sm, 85))
# Overall ATT
lines.extend(
[
"-" * 85,
"Overall Average Treatment Effect on the Treated".center(85),
"-" * 85,
f"{'Parameter':<15} {'Estimate':>12} {'Std. Err.':>12} "
f"{'t-stat':>10} {'P>|t|':>10} {'Sig.':>6}",
"-" * 85,
f"{'ATT':<15} {self.overall_att:>12.4f} {self.overall_se:>12.4f} "
f"{self.overall_t_stat:>10.3f} {self.overall_p_value:>10.4f} "
f"{_get_significance_stars(self.overall_p_value):>6}",
"-" * 85,
"",
f"{conf_level}% Confidence Interval: "
f"[{self.overall_conf_int[0]:.4f}, {self.overall_conf_int[1]:.4f}]",
]
)
cv = self.coef_var
if np.isfinite(cv):
lines.append(f"{'CV (SE/|ATT|):':<25} {cv:>10.4f}")
lines.append("")
# Event study effects
if self.event_study_effects:
lines.extend(
[
"-" * 85,
"Event Study (Dynamic) Effects".center(85),
"-" * 85,
f"{'Rel. Period':<15} {'Estimate':>12} {'Std. Err.':>12} "
f"{'t-stat':>10} {'P>|t|':>10} {'Sig.':>6}",
"-" * 85,
]
)
for rel_t in sorted(self.event_study_effects.keys()):
eff = self.event_study_effects[rel_t]
sig = _get_significance_stars(eff["p_value"])
lines.append(
f"{rel_t:<15} {eff['effect']:>12.4f} {eff['se']:>12.4f} "
f"{eff['t_stat']:>10.3f} {eff['p_value']:>10.4f} {sig:>6}"
)
lines.extend(["-" * 85, ""])
# Group effects
if self.group_effects:
lines.extend(
[
"-" * 85,
"Effects by Treatment Cohort".center(85),
"-" * 85,
f"{'Cohort':<15} {'Estimate':>12} {'Std. Err.':>12} "
f"{'t-stat':>10} {'P>|t|':>10} {'Sig.':>6}",
"-" * 85,
]
)
for group in sorted(self.group_effects.keys()):
eff = self.group_effects[group]
sig = _get_significance_stars(eff["p_value"])
lines.append(
f"{group:<15} {eff['effect']:>12.4f} {eff['se']:>12.4f} "
f"{eff['t_stat']:>10.3f} {eff['p_value']:>10.4f} {sig:>6}"
)
lines.extend(["-" * 85, ""])
lines.extend(
[
"Signif. codes: '***' 0.001, '**' 0.01, '*' 0.05, '.' 0.1",
"=" * 85,
]
)
return "\n".join(lines)
def print_summary(self, alpha: Optional[float] = None) -> None:
"""Print summary to stdout."""
print(self.summary(alpha))
def to_dataframe(self, level: str = "group_time") -> pd.DataFrame:
"""Convert results to DataFrame.
Parameters
----------
level : str
``"group_time"``, ``"event_study"``, or ``"group"``.
"""
if level == "group_time":
rows = []
for (g, t), data in self.group_time_effects.items():
rows.append(
{
"group": g,
"time": t,
"effect": data["effect"],
"se": data["se"],
"t_stat": data["t_stat"],
"p_value": data["p_value"],
"conf_int_lower": data["conf_int"][0],
"conf_int_upper": data["conf_int"][1],
}
)
return pd.DataFrame(rows)
elif level == "event_study":
if self.event_study_effects is None:
raise ValueError("Event study effects not computed. Use aggregate='event_study'.")
rows = []
for rel_t, data in sorted(self.event_study_effects.items()):
rows.append(
{
"relative_period": rel_t,
"effect": data["effect"],
"se": data["se"],
"t_stat": data["t_stat"],
"p_value": data["p_value"],
"conf_int_lower": data["conf_int"][0],
"conf_int_upper": data["conf_int"][1],
}
)
return pd.DataFrame(rows)
elif level == "group":
if self.group_effects is None:
raise ValueError("Group effects not computed. Use aggregate='group'.")
rows = []
for group, data in sorted(self.group_effects.items()):
rows.append(
{
"group": group,
"effect": data["effect"],
"se": data["se"],
"t_stat": data["t_stat"],
"p_value": data["p_value"],
"conf_int_lower": data["conf_int"][0],
"conf_int_upper": data["conf_int"][1],
}
)
return pd.DataFrame(rows)
else:
raise ValueError(
f"Unknown level: {level}. " "Use 'group_time', 'event_study', or 'group'."
)
@property
def is_significant(self) -> bool:
"""Check if overall ATT is significant."""
return bool(self.overall_p_value < self.alpha)
@property
def significance_stars(self) -> str:
"""Significance stars for overall ATT."""
return _get_significance_stars(self.overall_p_value)