FATE
FATE copied to clipboard
apply for job 202308141030506181140 resource failed
use_registry: false
use_deserialize_safe_module: false
dependent_distribution: true
encrypt_password: false
encrypt_module: fate_arch.common.encrypt_utils#pwdecrypt
private_key:
private_key_file:
party_id:
hook_module:
client_authentication: fate_flow.hook.flow.client_authentication
site_authentication: fate_flow.hook.flow.site_authentication
permission: fate_flow.hook.flow.permission
hook_server_name:
authentication:
client:
switch: false
http_app_key:
http_secret_key:
site:
switch: false
permission:
switch: false
component: false
dataset: false
fateflow:
# you must set real ip address, 127.0.0.1 and 0.0.0.0 is not supported
host: 10.2.103.192
http_port: 9380
grpc_port: 9360
# when you have multiple fateflow server on one party,
# we suggest using nginx for load balancing.
nginx:
host:
http_port:
grpc_port:
# use random instance_id instead of {host}:{http_port}
random_instance_id: false
# support rollsite/nginx/fateflow as a coordination proxy
# rollsite support fate on eggroll, use grpc protocol
# nginx support fate on eggroll and fate on spark, use http or grpc protocol, default is http
# fateflow support fate on eggroll and fate on spark, use http protocol, but not support exchange network mode
# format(proxy: rollsite) means rollsite use the rollsite configuration of fate_one_eggroll and nginx use the nginx configuration of fate_one_spark
# you also can customize the config like this(set fateflow of the opposite party as proxy):
# proxy:
# name: fateflow
# host: xx
# http_port: xx
# grpc_port: xx
proxy: nginx
# support default/http/grpc
protocol: default
database:
name: fate_flow
user: fate
passwd: fate_dev
host: 10.2.103.192
port: 3306
max_connections: 100
stale_timeout: 30
zookeeper:
hosts:
- 127.0.0.1:2181
use_acl: false
user: fate
password: fate
# engine services
default_engines:
computing: standalone
federation: pulsar
storage: hdfs
fate_on_standalone:
standalone:
cores_per_node: 3
nodes: 1
fate_on_spark:
spark:
# default use SPARK_HOME environment variable
home: /data/projects/fate/common/spark
cores_per_node: 4
nodes: 3
linkis_spark:
cores_per_node: 3
nodes: 2
host: 10.2.103.192
port: 9001
token_code: MLSS
python_path: /data/projects/fate/python
hive:
host: 127.0.0.1
port: 10000
auth_mechanism:
username:
password:
linkis_hive:
host: 127.0.0.1
port: 9001
hdfs:
name_node: hdfs://fate-cluster
# default /
path_prefix:
pulsar:
host: 10.2.103.192
port: 6650
mng_port: 8080
cluster: standalone
# all parties should use a same tenant
tenant: fl-tenant
# message ttl in minutes
topic_ttl: 5
# default conf/pulsar_route_table.yaml
route_table:
# mode: replication / client, default: replication
mode: replication
max_message_size: 1048576
nginx:
host: 10.2.103.192
http_port: 9300
grpc_port: 9310
# external services
fateboard:
host: 10.2.103.192
port: 6060
enable_model_store: false
model_store_address:
# use mysql as the model store engine
# storage: mysql
# database: fate_model
# user: fate
# password: fate
# host: 127.0.0.1
# port: 3306
# other optional configs send to the engine
# max_connections: 10
# stale_timeout: 10
# use tencent cos as model store engine
storage: mysql
name: fate_flow
user: fate
passwd: fate_dev
host: 127.0.0.1
port: 3306
max_connections: 10
stale_timeout: 10
Region:
SecretId:
SecretKey:
Bucket:
servings:
hosts:
- 10.2.103.192:8000
fatemanager:
host: 10.2.103.192
port: 8001
federatedId: 0
hadoop资源
进行单边测试 10.2.103.191机器可以进行测试,10.2.103.192测试出现下面的问题,计算引擎改成spark,出现下面的问题。单独测试spark计算pi值,两台机器都可以进行计算。集群是按照fate on spark 部署