1/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include <string>
17
18#include "llvm/ADT/StringRef.h"
19#include "mlir/Dialect/Func/IR/FuncOps.h" // from @llvm-project
20#include "mlir/IR/Attributes.h" // from @llvm-project
21#include "mlir/IR/Builders.h" // from @llvm-project
22#include "mlir/IR/Operation.h" // from @llvm-project
23#include "mlir/Transforms/Passes.h" // from @llvm-project
24#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
25#include "tensorflow/compiler/xla/client/sharding_builder.h"
26#include "tensorflow/dtensor/mlir/dtensor_mlir_passes.h"
27#include "tensorflow/dtensor/mlir/ir/tf_dtensor.h"
28
29namespace tensorflow {
30namespace dtensor {
31
32namespace {
33#define GEN_PASS_DEF_DTENSORSETDEFAULTSHARDING
34#include "tensorflow/dtensor/mlir/dtensor_passes.h.inc"
35
36// Assigns inputs/outputs for TPU computation to logical core 0.
37void SetDefaultSharding(mlir::tf_device::ClusterFuncOp cluster,
38 mlir::OpBuilder* builder) {
39 const std::string logical_core_0_sharding =
40 xla::sharding_builder::AssignDevice(0).SerializeAsString();
41
42 llvm::SmallVector<llvm::StringRef, 4> input_sharding(cluster.getNumOperands(),
43 logical_core_0_sharding);
44 llvm::SmallVector<llvm::StringRef, 4> output_sharding(
45 cluster.getNumResults(), logical_core_0_sharding);
46
47 cluster->setAttr("input_sharding_configuration",
48 builder->getStrArrayAttr(input_sharding));
49 cluster->setAttr("output_sharding_configuration",
50 builder->getStrArrayAttr(output_sharding));
51}
52
53// MLIR pass that sets xla sharding of TPU computation input/outputs to
54// maximally assigned to logical core 0.
55struct DTensorSetDefaultSharding
56 : public impl::DTensorSetDefaultShardingBase<DTensorSetDefaultSharding> {
57 void runOnOperation() override {
58 mlir::MLIRContext& context = getContext();
59 mlir::OpBuilder builder(&context);
60
61 getOperation().walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
62 // Skip non-tpu device cluster_func.
63 auto replicate_attr =
64 cluster_func->getAttrOfType<mlir::StringAttr>("_tpu_replicate");
65 if (!replicate_attr) return;
66
67 SetDefaultSharding(cluster_func, &builder);
68 });
69 }
70};
71
72} // namespace
73
74std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
75CreateDTensorSetDefaultSharding() {
76 return std::make_unique<DTensorSetDefaultSharding>();
77}
78
79} // namespace dtensor
80} // namespace tensorflow
81