-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathio.py
More file actions
382 lines (321 loc) · 17.2 KB
/
io.py
File metadata and controls
382 lines (321 loc) · 17.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
# -*- coding: utf-8 -*-
"""io.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1JKqISS2b2G7M_GYGdGG-ztXPhaK0Fdfk
"""
# -*- coding: utf-8 -*-
"""Analyzing Cognitive Load and Task Performance with Markdown Standardization
This notebook investigates the impact of standardizing markdown language
on cognitive load and task performance when using different generative models.
It uses synthetic data to simulate a scenario where developers use different
LLMs with and without standardized markdown for their routines.
Workflow:
1. Data Loading and Validation: Load synthetic data and validate its structure.
2. Data Preprocessing: Scale numerical features.
3. Data Visualization: Generate KDE and Violin plots to compare distributions.
4. Statistical Analysis: Perform bootstrap analysis to calculate confidence intervals.
5. LLM Insights Report: Synthesize findings using simulated LLMs (Grok, Claude, Grok-Enhanced).
Keywords: Cognitive Load, Task Performance, Markdown Standardization, Generative Models, LLMs, Data Visualization, Statistical Analysis, Explainability
"""
import warnings
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from io import StringIO
# import plotly.express as px # For interactive plots - Removed as not used
from scipy.stats import bootstrap
# Suppress warnings (use with caution in production)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning, module="plotly")
# Google Colab environment check
try:
from google.colab import drive
drive.mount("/content/drive")
COLAB_ENV = True
except ImportError:
COLAB_ENV = False
print("Not running in Google Colab environment.")
# Constants
OUTPUT_PATH = "./output_markdown_impact/" if not COLAB_ENV else "/content/drive/MyDrive/output_markdown_impact/"
DEVELOPER_ID_COLUMN = "developer_id"
MODEL_TYPE_COLUMN = "model_type" # e.g., "Model A", "Model B"
MARKDOWN_STANDARDIZED_COLUMN = "markdown_standardized" # Boolean: True/False
COGNITIVE_LOAD_COLUMN = "cognitive_load" # Measured on a scale (e.g., 0-10)
TASK_PERFORMANCE_COLUMN = "task_performance" # Measured on a scale (e.g., 0-100)
MODEL_GROK_NAME = "grok-base"
MODEL_CLAUDE_NAME = "claude-3.7-sonnet"
MODEL_GROK_ENHANCED_NAME = "grok-enhanced"
LINE_WIDTH = 2.5
BOOTSTRAP_RESAMPLES = 500
# Placeholder API Keys (Security Warning)
GROK_API_KEY = "YOUR_GROK_API_KEY" # Placeholder
CLAUDE_API_KEY = "YOUR_CLAUDE_API_KEY" # Placeholder
# --- DDQN Agent Class ---
class DDQNAgent:
"""
A simplified DDQN agent for demonstration purposes. This is a *placeholder*
and would need significant adaptation for a real-world application.
"""
def __init__(self, state_dim, action_dim):
self.state_dim = state_dim
self.action_dim = action_dim
# Initialize Q-network and target network with random values (for demonstration)
self.q_network = np.random.rand(state_dim, action_dim)
self.target_network = np.copy(self.q_network)
def act(self, state, epsilon=0.01):
"""Epsilon-greedy action selection."""
if np.random.rand() < epsilon:
return np.random.choice(self.action_dim) # Explore
else:
return np.argmax(self.q_network[state]) # Exploit
def learn(self, batch, gamma=0.99, learning_rate=0.1):
"""Placeholder learning function. A real implementation would update the Q-network."""
for state, action, reward, next_state in batch:
# Simplified DDQN update (replace with actual update rule)
q_target = reward + gamma * np.max(self.target_network[next_state])
q_predict = self.q_network[state, action]
self.q_network[state, action] += learning_rate * (q_target - q_predict)
def update_target_network(self):
"""Placeholder target network update."""
self.target_network = np.copy(self.q_network)
# --- Helper Functions ---
def create_output_directory(path):
"""Creates the output directory if it doesn't exist, handling errors."""
try:
os.makedirs(path, exist_ok=True)
return True
except OSError as e:
print(f"Error creating output directory: {e}")
return False
def load_data_from_synthetic_string(csv_string):
"""Loads data from a CSV string, handling errors."""
try:
csv_file = StringIO(csv_string)
return pd.read_csv(csv_file)
except pd.errors.ParserError as e:
print(f"Error parsing CSV data: {e}")
return None
except Exception as e:
print(f"Error loading data: {e}")
return None
def validate_dataframe(df, required_columns):
"""Validates the DataFrame, handling errors."""
if df is None:
print("Error: DataFrame is None. Cannot validate.")
return False
missing_columns = [col for col in required_columns if col not in df.columns]
if missing_columns:
print(f"Error: Missing columns: {missing_columns}")
return False
if df[DEVELOPER_ID_COLUMN].duplicated().any():
print("Error: Duplicate developer IDs found.")
return False
# Check for correct data types (simplified - adjust as needed)
if not pd.api.types.is_numeric_dtype(df[COGNITIVE_LOAD_COLUMN]):
print(f"Error: {COGNITIVE_LOAD_COLUMN} should be numeric.")
return False
if not pd.api.types.is_numeric_dtype(df[TASK_PERFORMANCE_COLUMN]):
print(f"Error: {TASK_PERFORMANCE_COLUMN} should be numeric.")
return False
# Check for valid values in 'markdown_standardized' (assuming boolean)
if not df[MARKDOWN_STANDARDIZED_COLUMN].isin([0, 1]).all(): # Assuming 0/1 for False/True
print(f"Error: Invalid values in {MARKDOWN_STANDARDIZED_COLUMN}. Must be 0 or 1.")
return False
return True
def scale_data(df, columns):
"""Scales specified columns using MinMaxScaler, handling errors."""
try:
scaler = MinMaxScaler()
df[columns] = scaler.fit_transform(df[columns])
return df
except Exception as e:
print(f"Error during data scaling: {e}")
return None
def analyze_text_with_llm(text, model_name):
"""Placeholder for LLM analysis. Replace with actual API calls."""
text_lower = text.lower()
if model_name == MODEL_GROK_NAME:
if "cognitive load" in text_lower:
return "Grok-base: Analysis suggests that markdown standardization may influence cognitive load."
elif "task performance" in text_lower:
return "Grok-base: Initial analysis indicates a potential relationship between markdown standardization and task performance."
else:
return f"Grok-base: General analysis on '{text}'."
elif model_name == MODEL_CLAUDE_NAME:
if "kde plot" in text_lower:
return "Claude 3.7: The KDE plot visually compares the distributions of cognitive load and task performance with and without markdown standardization."
elif "violin plot" in text_lower:
return "Claude 3.7: The violin plot shows the distribution of cognitive load and task performance across different model types and standardization statuses."
else:
return f"Claude 3.7: Enhanced analysis on '{text}'."
elif model_name == MODEL_GROK_ENHANCED_NAME:
if "cognitive load" in text_lower and "task performance" in text_lower:
return "Grok-Enhanced: Analysis indicates that standardizing markdown usage leads to a statistically significant reduction in cognitive load and a corresponding increase in task performance across different generative models."
elif "kde plot" in text_lower:
return "Grok-Enhanced: The KDE plot clearly shows a shift towards lower cognitive load and higher task performance when markdown is standardized, suggesting improved efficiency."
elif "violin plot" in text_lower:
return "Grok-Enhanced: The violin plot reveals that the benefits of markdown standardization are consistent across different generative models, although the magnitude of the effect may vary."
else:
return f"Grok-Enhanced: In-depth analysis on '{text}'."
return f"Model '{model_name}' not supported."
def create_kde_plot(df, standardized_col, non_standardized_col, output_path, colors=None):
"""Creates a KDE plot comparing standardized and non-standardized data.
Now correctly filters data for standardized and non-standardized groups.
"""
try:
plt.figure(figsize=(10, 6))
plt.style.use('dark_background')
# Filter data for standardized and non-standardized groups
df_standardized = df[df[MARKDOWN_STANDARDIZED_COLUMN] == 1]
df_non_standardized = df[df[MARKDOWN_STANDARDIZED_COLUMN] == 0]
# Plot KDE for standardized data
sns.kdeplot(data=df_standardized[standardized_col], color="#00FFFF", label=f"{standardized_col.replace('_', ' ').capitalize()} (Standardized)", linewidth=LINE_WIDTH)
# Plot KDE for non-standardized data
sns.kdeplot(data=df_non_standardized[non_standardized_col], color="#FF00FF", label=f"{non_standardized_col.replace('_', ' ').capitalize()} (Non-Standardized)", linewidth=LINE_WIDTH)
plt.title(f'KDE Plot of {standardized_col.replace("_", " ").capitalize()} vs. {non_standardized_col.replace("_", " ").capitalize()}', color='white')
plt.xlabel(standardized_col.replace("_", " ").capitalize()) # Add x-axis label
plt.ylabel("Density") # Add y-axis label
plt.legend(facecolor='black', edgecolor='white', labelcolor='white')
plt.savefig(os.path.join(output_path, f'kde_plot_{standardized_col}_{non_standardized_col}.png'))
plt.close()
return f"KDE plot comparing {standardized_col} and {non_standardized_col}."
except Exception as e:
print(f"Error creating KDE plot: {e}")
return "Error creating KDE plot."
def create_violin_plot(df, x_column, y_column, output_path, title, filename):
"""Creates a violin plot."""
try:
plt.figure(figsize=(10, 6))
plt.style.use('dark_background')
sns.violinplot(data=df, x=x_column, y=y_column, linewidth=LINE_WIDTH)
plt.title(title, color='white')
plt.savefig(os.path.join(output_path, filename))
plt.close()
return f"Violin plot showing {y_column} across {x_column}."
except Exception as e:
print(f"Error creating violin plot: {e}")
return "Error creating violin plot."
def perform_bootstrap(data, statistic, n_resamples=BOOTSTRAP_RESAMPLES):
"""Performs bootstrap analysis and returns the confidence interval."""
try:
bootstrap_result = bootstrap((data,), statistic, n_resamples=n_resamples, method='percentile', random_state=42) # Added random_state
return bootstrap_result.confidence_interval
except Exception as e:
print(f"Error during bootstrap analysis: {e}")
return (None, None)
def save_summary(df, bootstrap_ci_load, bootstrap_ci_performance, output_path):
"""Saves summary statistics and bootstrap CIs."""
try:
summary_text = df.describe().to_string() + f"\n\nBootstrap CI for Cognitive Load: {bootstrap_ci_load}\nBootstrap CI for Task Performance: {bootstrap_ci_performance}"
with open(os.path.join(output_path, 'summary.txt'), 'w') as f:
f.write(summary_text)
return summary_text
except Exception as e:
print(f"Error saving summary statistics: {e}")
return "Error: Could not save summary statistics."
def generate_insights_report(summary_stats_text, kde_plot_desc_load, kde_plot_desc_performance, violin_plot_desc, output_path):
"""Generates an insights report using (simulated) LLM calls."""
try:
grok_insights = (
analyze_text_with_llm(f"Analyze summary statistics:\n{summary_stats_text}", MODEL_GROK_NAME) + "\n\n"
)
claude_insights = (
analyze_text_with_llm(f"Interpret KDE plot for cognitive load: {kde_plot_desc_load}", MODEL_CLAUDE_NAME) + "\n\n" +
analyze_text_with_llm(f"Interpret KDE plot for task performance: {kde_plot_desc_performance}", MODEL_CLAUDE_NAME) + "\n\n" +
analyze_text_with_llm(f"Interpret Violin plot: {violin_plot_desc}", MODEL_CLAUDE_NAME) + "\n\n"
)
grok_enhanced_insights = analyze_text_with_llm(
f"Provide enhanced insights on the impact of markdown standardization on cognitive load and task performance.",
MODEL_GROK_ENHANCED_NAME
)
combined_insights = f"""
Combined Insights Report: Impact of Markdown Standardization on Cognitive Load and Task Performance
Grok-base Analysis:
{grok_insights}
Claude 3.7 Sonnet Analysis:
{claude_insights}
Grok-Enhanced Analysis:
{grok_enhanced_insights}
Synthesized Summary:
This report synthesizes insights from Grok-base, Claude 3.7 Sonnet, and Grok-Enhanced, focusing on the impact of markdown standardization on cognitive load and task performance when using different generative models. Grok-base provides a statistical overview, suggesting potential relationships between markdown standardization, cognitive load, and task performance. Claude 3.7 offers interpretations of the KDE and violin plots, highlighting the visual differences in distributions. Grok-Enhanced provides a more in-depth analysis, concluding that standardizing markdown usage leads to a statistically significant reduction in cognitive load and a corresponding increase in task performance across different generative models. The combined analyses suggest that consistent use of markdown can improve developer efficiency and reduce mental effort.
"""
with open(os.path.join(output_path, 'insights.txt'), 'w') as f:
f.write(combined_insights)
print(f"Insights saved to: {os.path.join(output_path, 'insights.txt')}")
return "Insights report generated successfully."
except Exception as e:
print(f"Error generating insights report: {e}")
return "Error generating insights report."
# --- Main Script ---
if __name__ == "__main__":
# Create output directory
if not create_output_directory(OUTPUT_PATH):
exit()
# Synthetic dataset
synthetic_dataset = """
developer_id,model_type,markdown_standardized,cognitive_load,task_performance
D001,Model A,1,4.5,78.2
D002,Model A,0,6.2,65.5
D003,Model B,1,3.8,85.1
D004,Model B,0,5.9,70.3
D005,Model A,1,4.1,81.5
D006,Model A,0,6.8,62.8
D007,Model B,1,3.5,88.0
D008,Model B,0,5.5,73.9
D009,Model A,1,4.3,79.7
D010,Model A,0,6.5,68.2
D011,Model B,1,3.2,90.4
D012,Model B,0,5.7,71.6
D013,Model A,1,4.0,82.9
D014,Model A,0,7.1,60.1
D015,Model B,1,3.9,86.5
D016,Model B,0,6.1,69.8
D017,Model A,1,4.7,76.5
D018,Model A,0,6.3,67.1
D019,Model B,1,3.3,89.2
D020,Model B,0,5.8,72.5
"""
# Load and validate data
df = load_data_from_synthetic_string(synthetic_dataset)
if df is None:
exit()
required_columns = [DEVELOPER_ID_COLUMN, MODEL_TYPE_COLUMN, MARKDOWN_STANDARDIZED_COLUMN, COGNITIVE_LOAD_COLUMN, TASK_PERFORMANCE_COLUMN]
if not validate_dataframe(df, required_columns):
exit()
# --- DDQN Agent Placeholder ---
# Example state and action space (adapt to your needs)
state_dim = 2 # Example: cognitive_load, task_performance
action_dim = 3 # Example: standardize_markdown, dont_standardize, monitor_metrics
agent = DDQNAgent(state_dim, action_dim)
# Example usage (replace with actual environment interaction)
sample_state = np.array([0.5, 0.7]) # Example state (scaled cognitive load and task performance)
action = agent.act(np.argmax(sample_state)) # Get action for the state
print(f"\nDDQN Agent Action (Placeholder): {action}") # Output the action
# --- Data Preprocessing ---
# Preprocess data: Scale numerical features
df = scale_data(df, [COGNITIVE_LOAD_COLUMN, TASK_PERFORMANCE_COLUMN])
if df is None:
exit()
# --- Visualizations ---
# KDE Plots - Corrected to filter by standardization
kde_plot_desc_load = create_kde_plot(df, COGNITIVE_LOAD_COLUMN, COGNITIVE_LOAD_COLUMN, OUTPUT_PATH)
kde_plot_desc_performance = create_kde_plot(df, TASK_PERFORMANCE_COLUMN, TASK_PERFORMANCE_COLUMN, OUTPUT_PATH)
# Violin Plot
violin_plot_desc = create_violin_plot(df, MARKDOWN_STANDARDIZED_COLUMN, COGNITIVE_LOAD_COLUMN, OUTPUT_PATH,
"Cognitive Load by Markdown Standardization", "violin_cognitive_load.png")
violin_plot_desc_perf = create_violin_plot(df, MARKDOWN_STANDARDIZED_COLUMN, TASK_PERFORMANCE_COLUMN, OUTPUT_PATH,
"Task Performance by Markdown Standardization", "violin_task_performance.png")
# --- Statistical Analysis ---
# Bootstrap for Cognitive Load
bootstrap_ci_load = perform_bootstrap(df[COGNITIVE_LOAD_COLUMN], np.mean)
# Bootstrap for Task Performance
bootstrap_ci_performance = perform_bootstrap(df[TASK_PERFORMANCE_COLUMN], np.mean)
# --- Save Summary ---
summary_stats_text = save_summary(df, bootstrap_ci_load, bootstrap_ci_performance, OUTPUT_PATH)
# --- Generate Insights Report ---
generate_insights_report(summary_stats_text, kde_plot_desc_load, kde_plot_desc_performance, violin_plot_desc, OUTPUT_PATH)
print("Execution completed successfully - Markdown Standardization Analysis Notebook.")