-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathquiz_2.R
More file actions
113 lines (87 loc) · 3.55 KB
/
quiz_2.R
File metadata and controls
113 lines (87 loc) · 3.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Setup -------------------------------------------------------------------
library(AppliedPredictiveModeling)
library(caret)
library(Hmisc)
# Question 1 --------------------------------------------------------------
rm(list = ls())
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
trainIndex = createDataPartition(diagnosis, p = 0.50,list=FALSE)
training = adData[trainIndex,]
testing = adData[-trainIndex,]
# Question 2 --------------------------------------------------------------
rm(list = ls())
source('multiplot.R')
data(concrete)
set.seed(975)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
vars = names(training[,!(names(training) %in% c('CompressiveStrength'))])
plots = list()
tt = training
tt$index = 1:nrow(tt)
for(i in 1:length(vars)) {
vname = sprintf('%s_Factor', vars[i])
tt[,vname] = cut2(tt[,vars[i]], g = 4)
plots[[i]] = ggplot(tt) + theme_bw() + geom_point(aes_string(x = 'index', y = 'CompressiveStrength', color = vname))
}
multiplot(plotlist = plots, cols = 3)
# There are no variables that are correlated with the compressive strength
featurePlot(x = training[,!(names(training) %in% c('CompressiveStrength'))], y = training$CompressiveStrength, plot = 'pairs')
# Question 3 --------------------------------------------------------------
rm(list = ls())
data(concrete)
set.seed(975)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
ggplot(training) + theme_bw() + geom_histogram(aes(x = Superplasticizer))
sum(training$Superplasticizer == 0) / nrow(training)
# There are zero values here and therefore the log transfor would yeild -Inf values
# Question 4 --------------------------------------------------------------
rm(list = ls())
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
# Extract all the IL variables
subtrain = training[,substr(names(training), 1, 3) == 'IL_']
r = preProcess(subtrain, method = "pca", thresh = 0.8)
rtrain = predict(r, subtrain)
rtrain$diagnosis = training$diagnosis
modelfit = train(rtrain$diagnosis ~ ., method = 'glm', data = rtrain)
summary(modelfit)
# The number of PCA components is 7 based on the output
# Question 5 --------------------------------------------------------------
rm(list = ls())
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
# Extract all the IL variables
subtrain = training[,substr(names(training), 1, 3) == 'IL_']
subtest = testing[,substr(names(testing), 1, 3) == 'IL_']
r = preProcess(subtrain, method = "pca", thresh = 0.8)
rtrain = predict(r, subtrain)
rtrain$diagnosis = training$diagnosis
modelfit_PCA = train(rtrain$diagnosis ~ ., method = 'glm', data = rtrain)
summary(modelfit_PCA)
strain = subtrain
strain$diagnosis = training$diagnosis
modelfit_noPCA = train(strain$diagnosis ~ ., method = 'glm', data = strain)
summary(modelfit_noPCA)
test_pca = predict(r, subtest)
test_nopca = subtest
test_pca$diagnosis = testing$diagnosis
test_nopca$diagnosis = testing$diagnosis
test_pca_pred = predict(modelfit_PCA, test_pca)
test_nopca_pred = predict(modelfit_noPCA, test_nopca)
confusionMatrix(test_pca_pred, testing$diagnosis)
confusionMatrix(test_nopca_pred, testing$diagnosis)
# Non-PCA Accuracy: 0.65
# PCA Accuracy: 0.72