tf_1.8_xla_doc
hlo_module.h
Go to the documentation of this file.
1 
3 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8 
9  http://www.apache.org/licenses/LICENSE-2.0
10 
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16 ==============================================================================*/
17 
18 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_MODULE_H_
19 #define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_MODULE_H_
20 
21 #include <atomic>
22 #include <list>
23 #include <memory>
24 #include <random>
25 #include <string>
26 #include <unordered_map>
27 #include <vector>
28 
29 #include "tensorflow/compiler/xla/iterator_util.h"
30 #include "tensorflow/compiler/xla/service/hlo.pb.h"
33 #include "tensorflow/compiler/xla/service/hlo_module_config.h"
34 #include "tensorflow/compiler/xla/service/name_uniquer.h"
36 #include "tensorflow/compiler/xla/types.h"
37 #include "tensorflow/core/lib/gtl/array_slice.h"
38 #include "tensorflow/core/lib/gtl/iterator_range.h"
39 #include "tensorflow/core/platform/logging.h"
40 #include "tensorflow/core/platform/mutex.h"
41 
42 namespace xla {
43 
52 class HloModule {
53  public:
54  HloModule(const string& name,
55  const VersionedComputationHandle& entry_computation_handle,
56  const HloModuleConfig& config);
57 
58  // Constructor without a versioned computation handle. This constructor should
59  // only be used for HloModules used outside of the XLA service (eg
60  // tests). The versioned handle is used by the service in the compilation
61  // cache. A default configuration is created for this module.
62  explicit HloModule(const string& name);
63  explicit HloModule(const string& name, const HloModuleConfig& config);
64 
65  // Adds an entry computation to the module. A module can only have one entry
66  // computation. Returns a pointer to the newly added computation.
67  HloComputation* AddEntryComputation(
68  std::unique_ptr<HloComputation> computation);
69 
70  // Adds an embedded computation to the module.
71  HloComputation* AddEmbeddedComputation(
72  std::unique_ptr<HloComputation> computation);
73 
74  // Removes an embedded computation.
75  Status RemoveEmbeddedComputation(HloComputation* to_remove);
76 
77  // Replaces all uses of computations that are keys of 'replacements' with
78  // the corresponding values in 'replacements'. Replaces the entry computation,
79  // if applicable.
80  //
81  // This function iterates over all instructions in the module to find
82  // computations to replace. We could speed it up by keeping track of users of
83  // computations.
84  void ReplaceComputations(
85  const std::unordered_map<HloComputation*, HloComputation*>& replacements);
86 
87  const string& name() const { return name_; }
88 
89  // Returns a deep copy of this module including all computations.
90  std::unique_ptr<HloModule> Clone(const string& suffix = "clone") const;
91 
92  // Performs a deep clone of the computation, by recursively cloning all
93  // the called computations as well.
94  HloComputation* DeepCloneComputation(HloComputation* computation);
95 
96  // Return a pointer to the entry computation of the module..
97  const HloComputation* entry_computation() const {
98  CHECK_NE(nullptr, entry_computation_);
99  return entry_computation_;
100  }
101  HloComputation* entry_computation() {
102  CHECK_NE(nullptr, entry_computation_);
103  return entry_computation_;
104  }
105 
106  ComputationLayout* mutable_entry_computation_layout() {
107  return config_.mutable_entry_computation_layout();
108  }
109 
110  const ComputationLayout& entry_computation_layout() const {
111  return config_.entry_computation_layout();
112  }
113 
114  const VersionedComputationHandle& entry_computation_handle() const {
115  return entry_computation_handle_;
116  }
117 
118  // Gets the computations in this module.
119  //
120  // Returns a view of HloComputation*s, so you can iterate over this in the
121  // natural way:
122  //
123  // for (HloComputation* c : module->computations()) { ... }
124  //
125  tensorflow::gtl::iterator_range<UnwrappingIterator<
126  std::vector<std::unique_ptr<HloComputation>>::const_iterator>>
127  computations() const {
128  return {MakeUnwrappingIterator(computations_.begin()),
129  MakeUnwrappingIterator(computations_.end())};
130  }
131  tensorflow::gtl::iterator_range<UnwrappingIterator<
132  std::vector<std::unique_ptr<HloComputation>>::iterator>>
133  computations() {
134  return {MakeUnwrappingIterator(computations_.begin()),
135  MakeUnwrappingIterator(computations_.end())};
136  }
137 
138  // Gets the number of computations in this module.
139  int64 computation_count() const { return computations_.size(); }
140 
141  // Gets the number of instructions in this module.
142  int64 instruction_count() const;
143 
144  // Compute and return a post order of all computations in the module. The sort
145  // is defined like so: if computation A has an instruction which calls
146  // computation B, then A will appear after B in the sort.
147  std::list<HloComputation*> MakeComputationPostOrder() const;
148 
152  std::vector<HloComputation*> MakeNonfusionComputations() const;
153 
154  const HloModuleConfig& config() const { return config_; }
155 
156  // Return a string representation of the module.
157  //
158  // (We express the default options using an overload rather than a default
159  // param because gdb ignores default params, but does resolve overloads.)
160  string ToString() const { return ToString(HloPrintOptions()); }
161  string ToString(const HloPrintOptions& options) const;
162 
163  // Convert an HloModule to or from a proto.
164  HloModuleProto ToProto() const;
165  static StatusOr<std::unique_ptr<HloModule>> CreateFromProto(
166  const HloModuleProto& proto, const HloModuleConfig& module_config,
167  const VersionedComputationHandle& entry_computation_handle =
169 
170  // Creates and returns an HloModuleConfig with an appropriate program shape
171  // for the HLO module in the given proto.
172  static StatusOr<HloModuleConfig> CreateModuleConfigFromProto(
173  const HloModuleProto& module, const DebugOptions& debug_options);
174 
175  // Outlines the given expression from the given computation.
176  // instructions_to_outline contains the instructions that form the expression.
177  //
178  // Precondition: instructions in instructions_to_outline are in topological
179  // order (root of outlined instructions last). TODO(jingyue): takes a set of
180  // instructions and topologically sorts them.
181  HloInstruction* OutlineExpressionFromComputation(
182  tensorflow::gtl::ArraySlice<HloInstruction*> instructions_to_outline,
183  const string& outlined_computation_name, HloComputation* computation);
184 
185  // Returns a randomly generated uint64.
186  uint64 RandomNew64() const;
187 
188  // Returns the NameUniquer for uniquing instruction names in this module.
189  NameUniquer& instruction_name_uniquer() { return instruction_name_uniquer_; }
190 
191  // Assign a new unique dense id for an instruction
192  int NewUniqueInstructionId() {
193  int result = next_unique_id_;
194  next_unique_id_++;
195  return result;
196  }
197 
198  // Returns the number of unique intruction ids given out. All ids up to
199  // this point are guaranteed to be in the range [0..NumUniqueInstructionIds())
200  int NumUniqueInstructionIds() const { return next_unique_id_; }
201 
202  // Returns an id that is unique to this module across all modules created over
203  // the lifetime of this process.
204  int unique_id() const { return unique_id_; }
205 
206  private:
207  HloComputation* AddComputationInternal(
208  std::unique_ptr<HloComputation> computation, bool is_entry,
209  bool uniquify_names);
210 
211  const string name_;
212  HloModuleConfig config_;
213  HloComputation* entry_computation_ = nullptr;
214  std::vector<std::unique_ptr<HloComputation>> computations_;
215 
216  // Random number generator engine to use when generating random numbers per
217  // HloModule compilation.
218  // TODO(b/25995601): Replace with better seed setting or dev/random for
219  // where we don't need deterministic execution.
220  mutable std::mt19937_64 rng_{42};
221  mutable tensorflow::mutex rng_mutex_;
222 
223  // Versioned handle of the entry computation of the module.
224  bool has_entry_computation_handle_ = false;
225  VersionedComputationHandle entry_computation_handle_;
226 
227  // Unique name generator for computation and instruction names, which are
228  // unique per module.
229  NameUniquer computation_name_uniquer_{/*separator=*/"."};
230  NameUniquer instruction_name_uniquer_{/*separator=*/"."};
231  int next_unique_id_ = 0;
232 
233  // Used to keep track of the next unique module id that should be assigned.
234  static std::atomic<int> next_unique_module_id_;
235  // A unique id to label modules with.
236  int unique_id_;
237 };
238 
239 } // namespace xla
240 
241 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_MODULE_H_
Definition: hlo_computation.h:60
Definition: versioned_computation_handle.h:37
std::vector< HloComputation * > MakeNonfusionComputations() const
Definition: hlo_module.cc:474
Definition: hlo_instruction.h:165
namespace for xla
Definition: client_library.cc:26
Definition: hlo_module.h:52