1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
| // -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/DISKinematics.hh"
#include "Rivet/Projections/UnstableParticles.hh"
namespace Rivet {
/// @brief Inclusive D0 and D*+- production in deep inelastic e p scattering at HERA (H1)
class H1_1996_I421105 : public Analysis {
public:
/// Constructor
RIVET_DEFAULT_ANALYSIS_CTOR(H1_1996_I421105);
/// @name Analysis methods
///@{
/// Book histograms and initialise projections before the run
void init() {
declare(DISKinematics(), "Kinematics");
declare(UnstableParticles(), "UFS");
// Initialise and register projections
// The basic final-state projection:
// all final-state particles within
// the given eta acceptance
const FinalState fs(Cuts::abseta < 4.9);
declare(fs, "FS");
// Book histograms
// take binning from reference data using HEPData ID (digits in "d01-x01-y01" etc.)
book(_h["p_tD*_norm"], 4, 1, 1);
book(_h["p_tD*"], 4, 1, 2);
book(_h["p_tD0_norm"], 5, 1, 1);
book(_h["p_tD0"], 5, 1, 2);
book(_h["xD_D*_norm"], 6, 1, 1);
book(_h["xD_D*"], 6, 1, 2);
book(_h["xD_D0_norm"], 7, 1, 1);
book(_h["xD_D0"], 7, 1, 2);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
/// @todo Do the event by event analysis here
const DISKinematics& dk = apply<DISKinematics>(event, "Kinematics");
const LorentzTransform hcmboost = dk.boostHCM();
// Get the DIS kinematics
double y = dk.y();
double W2 = dk.W2()/GeV2;
double Q2 = dk.Q2()/GeV;
bool cut ;
cut = Q2 > 10 && Q2 < 100 && y > 0.01 && y < 0.7 ;
if (! cut ) vetoEvent ;
bool etacut ;
for (const Particle& p : apply<UnstableParticles>(event, "UFS").particles()) {
etacut = abs(p.momentum().eta()) < 1.5 ;
const FourMomentum hcmMom = hcmboost.transform(p.momentum());
double p_D ;
double x_D = 0 ;
p_D = std::sqrt( hcmMom.px()*hcmMom.px() + hcmMom.py()*hcmMom.py() + hcmMom.pz()*hcmMom.pz() );
x_D = 2.*p_D/sqrt(W2);
if (p.abspid() == 421) {
_h["p_tD0"]->fill(p.momentum().pT()/GeV);
_h["p_tD0_norm"]->fill(p.momentum().pT()/GeV);
if (etacut ) _h["xD_D0"]->fill(x_D);
if (etacut ) _h["xD_D0_norm"]->fill(x_D);
}
if (p.abspid() == 413) {
_h["p_tD*"]->fill(p.momentum().pT()/GeV);
_h["p_tD*_norm"]->fill(p.momentum().pT()/GeV);
// x_D is defined for the D0
if (etacut ) _h["xD_D*"]->fill(x_D);
if (etacut ) _h["xD_D*_norm"]->fill(x_D);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h["p_tD*"], crossSection()/nanobarn/sumW());
scale(_h["p_tD0"], crossSection()/nanobarn/sumW());
normalize(_h["p_tD*_norm"]);
normalize(_h["p_tD0_norm"]);
scale(_h["xD_D*"], crossSection()/nanobarn/sumW());
scale(_h["xD_D0"], crossSection()/nanobarn/sumW());
normalize(_h["xD_D*_norm"]);
normalize(_h["xD_D0_norm"]);
}
///@}
/// @name Histograms
///@{
map<string, Histo1DPtr> _h;
///@}
};
RIVET_DECLARE_PLUGIN(H1_1996_I421105);
}
|