1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
* This is a macro for testing General Linear Hypotheses
* of the type cb = d, where b is a vector of regression
* coefficients and c is a matrix of linear constraints
* (cf. Searle, 1971, Linear Models (Wiley)).
* the code for the example data generation part, comes from David Matheson:
*  Author: Johannes Naumann (email=johannes.naumann AT uni-koeln.de)
* March 22, 2005 .

* Build 100 cases in three predictors x1, x2, x3 and one dependent variable y all coming from a standard normal distribution .

new file.
input program.
loop id = 1 to 100.
numeric x1 to x3.
do repeat z = x1 to x3.
compute z = rv.normal(0,1).
end repeat.
compute y = rv.normal(0,1).
end case.
end loop.
end file.
end input program.
exe.

* make x1 and x2 correlate with y to .70 and each other to .20, while x3 is unrelated to both x1/x2 and y.

matrix.
get z
 /variables = x1 x2 x3 y.
compute cor = {1.0, 0.2, 0.0, 0.7;
 		   0.2, 1.0, 0.0, 0.7;
		   0.0, 0.0, 1.0, 0.0;
               0.7, 0.7, 0.0, 1.0}.
compute cho = chol(cor).
compute newz = z*cho.
save newz/outfile=* /variables= x1 to x3 y.
end matrix.

* Test the hypothesis that all Regression coefficients are equal .

preserve.
save outfile = 'alh__tmp.sav'.
set results listing printback on mprint off.
define alh_mak (dep = !charend('/')
               /pred = !charend('/')
               /cmat = !charend('/')
               /dmat = !charend('/')).
count miss__ = !pred !dep (sysmis missing).
select if miss__ = 0.
compute eins = 1.
matrix.
get y /vars = !dep.
get x /vars = eins !pred.
compute b = (inv(t(x)*x))*t(x)*y.
compute c = !cmat.
compute d = !dmat.
compute q = t(c*b-d)*inv(c*inv(t(x)*x)*t(c))*(c*b-d).
compute s = t(y)*y-nrow(y)*((csum(y)/nrow(y))**2).
compute z = t(b)*t(x)*y-nrow(y)*((csum(y)/nrow(y))**2).
compute e = s-z.
compute zdf = nrow(c).
compute ndf = nrow(x)-ncol(x).
compute f = (q/zdf)/(e/ndf).
compute o = {q,e,f,zdf,ndf}.
comp xx = inv(t(x)*x).
save o /outfile = alh_erg.sav /variables = qs_h qs_e,f_wert,df_h,df_e.
compute br = b-xx*t(c)*inv(c*xx*t(c))*(c*b-d).
print b /title 'b-unconstrained'.
print br /title 'b-constrained'.
end matrix.
get file = alh_erg.sav.
compute p = 1-cdf.f(f_wert,df_h,df_e).
execute.
formats all (f11.5).
formats df_h df_e (f11.0).
variable labels qs_h 'SSQ - Hypothesis (Increment in error sum of squares)'
 /qs_e 'Error SSQ'
 /f_wert 'F-Value'
 /df_h 'df - Hypothesis'
 /df_e 'df - Error'
 /p 'p-Value'.
report
  /format=list automatic align(left)
  /variables= qs_h qs_e.
report
  /format=list automatic align(left)
  /variables= df_h df_e.
report
  /format=list automatic align(left)
  /variables= f_wert p.
get file = alh__tmp.sav.
!enddefine.
restore.
alh_mak pred = x1 x2 x3/ dep = y/ cmat = {0,1,-1,0;0,0,1,-1}/ dmat = {0;0}/.