DESIGNED FOR REAL WORLD USE CASE

start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = checkpoints[get_model_parallel_rank()] checkpoint = torch.load(ckpt_path, map_location="cpu") with open(Path(ckpt_dir) / "params.json", "r") as f: params = json.loads(f.read()) model_args: ModelArgs = ModelArgs( max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params, ) tokenizer = Tokenizer(model_path=tokenizer_path) model_args.vocab_size = tokenizer.n_words torch.set_default_tensor_type(torch.cuda.HalfTensor) model = Transformer(model_args) model.load_state_dict(checkpoint, strict=False) print(f"Loaded in {time.time() - start_time:.2f} seconds") class Llama: @staticmethod def build( ckpt_dir: str, tokenizer_path: str, max_seq_len: int, max_batch_size: int, model_parallel_size: Optional[int] = None, seed: int = 1, ) -> "Llama":
start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = checkpoints[get_model_parallel_rank()] checkpoint = torch.load(ckpt_path, map_location="cpu") with open(Path(ckpt_dir) / "params.json", "r") as f: params = json.loads(f.read()) model_args: ModelArgs = ModelArgs( max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params, ) tokenizer = Tokenizer(model_path=tokenizer_path) model_args.vocab_size = tokenizer.n_words torch.set_default_tensor_type(torch.cuda.HalfTensor) model = Transformer(model_args) model.load_state_dict(checkpoint, strict=False) print(f"Loaded in {time.time() - start_time:.2f} seconds") class Llama: @staticmethod def build( ckpt_dir: str, tokenizer_path: str, max_seq_len: int, max_batch_size: int, model_parallel_size: Optional[int] = None, seed: int = 1, ) -> "Llama":
start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = checkpoints[get_model_parallel_rank()] checkpoint = torch.load(ckpt_path, map_location="cpu") with open(Path(ckpt_dir) / "params.json", "r") as f: params = json.loads(f.read()) model_args: ModelArgs = ModelArgs( max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params, ) tokenizer = Tokenizer(model_path=tokenizer_path) model_args.vocab_size = tokenizer.n_words torch.set_default_tensor_type(torch.cuda.HalfTensor) model = Transformer(model_args) model.load_state_dict(checkpoint, strict=False) print(f"Loaded in {time.time() - start_time:.2f} seconds") class Llama: @staticmethod def build( ckpt_dir: str, tokenizer_path: str, max_seq_len: int, max_batch_size: int, model_parallel_size: Optional[int] = None, seed: int = 1, ) -> "Llama":
start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = checkpoints[get_model_parallel_rank()] checkpoint = torch.load(ckpt_path, map_location="cpu") with open(Path(ckpt_dir) / "params.json", "r") as f: params = json.loads(f.read()) model_args: ModelArgs = ModelArgs( max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params, ) tokenizer = Tokenizer(model_path=tokenizer_path) model_args.vocab_size = tokenizer.n_words torch.set_default_tensor_type(torch.cuda.HalfTensor) model = Transformer(model_args) model.load_state_dict(checkpoint, strict=False) print(f"Loaded in {time.time() - start_time:.2f} seconds") class Llama: @staticmethod def build( ckpt_dir: str, tokenizer_path: str, max_seq_len: int, max_batch_size: int, model_parallel_size: Optional[int] = None, seed: int = 1, ) -> "Llama":
if not torch.distributed.is_initialized(): torch.distributed.init_process_group("nccl") if not model_parallel_is_initialized(): if model_parallel_size is None: model_parallel_size = int(os.environ.get("WORLD_SIZE", 1)) initialize_model_parallel(model_parallel_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) torch.cuda.set_device(local_rank) # seed must be the same in all processes torch.manual_seed(seed) if local_rank > 0: sys.stdout = open(os.devnull, "w") start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = prompt_tokens: List[List[int]], max_gen_len: int, temperature: float = 0.6, top_p: float = 0.9, logprobs: bool = False, echo: bool = False,
if not torch.distributed.is_initialized(): torch.distributed.init_process_group("nccl") if not model_parallel_is_initialized(): if model_parallel_size is None: model_parallel_size = int(os.environ.get("WORLD_SIZE", 1)) initialize_model_parallel(model_parallel_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) torch.cuda.set_device(local_rank) # seed must be the same in all processes torch.manual_seed(seed) if local_rank > 0: sys.stdout = open(os.devnull, "w") start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = prompt_tokens: List[List[int]], max_gen_len: int, temperature: float = 0.6, top_p: float = 0.9, logprobs: bool = False, echo: bool = False,
if not torch.distributed.is_initialized(): torch.distributed.init_process_group("nccl") if not model_parallel_is_initialized(): if model_parallel_size is None: model_parallel_size = int(os.environ.get("WORLD_SIZE", 1)) initialize_model_parallel(model_parallel_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) torch.cuda.set_device(local_rank) # seed must be the same in all processes torch.manual_seed(seed) if local_rank > 0: sys.stdout = open(os.devnull, "w") start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = prompt_tokens: List[List[int]], max_gen_len: int, temperature: float = 0.6, top_p: float = 0.9, logprobs: bool = False, echo: bool = False,
if not torch.distributed.is_initialized(): torch.distributed.init_process_group("nccl") if not model_parallel_is_initialized(): if model_parallel_size is None: model_parallel_size = int(os.environ.get("WORLD_SIZE", 1)) initialize_model_parallel(model_parallel_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) torch.cuda.set_device(local_rank) # seed must be the same in all processes torch.manual_seed(seed) if local_rank > 0: sys.stdout = open(os.devnull, "w") start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = prompt_tokens: List[List[int]], max_gen_len: int, temperature: float = 0.6, top_p: float = 0.9, logprobs: bool = False, echo: bool = False,

ONCHAIN AI SUPPORTING MERCHANTS AND ADOPTION

Projects and merchants will be able to use large scale AI modeling to build digital products, create NFT loyalty items, and elevate user experience - coding tools and no-code one-click options available.
CRYPTOBNK

The leading decentralized payment blockchain

Powering the largest network of crypto exchange stores and retail outlets — enabling transactions in Bitcoin, crypto, forex, RWA and NFTs in real life

#1      
The number 1 decentralized physical payment blockchain network

ONCHAIN

Bridging retailers from traditional payment to onchain payment

PHYSICAL STORES

Powering the world’s largest physical crypto exchange store

PAYMENTS

Pay using crypto

PURCHASES

Buy your first NFTs and RWAs in real life

EVERYTHING

Onchain, any chain, any wallet

Multi-chain & Cross-chain

Function X supports cross-chains internally (between f(x)Core and other Function X chains) and among other chains (Ethereum, etc). Through decentralized validators we can safely move assets between f(x)Core and different chains. This comes with high transaction speed, low transaction cost, ability to trade assets, and other useful scenarios.

f(x)Core

f(x)Core is the core network of the entire Function X ecosystem. It is the interlink of all assets and cross-chains.

Cross-chain bridge

It is the main bridge connecting Function X to Ethereum, Binance Smart Chain and other blockchains.

Multi-chain structure

Function X is deploying multi-chain frameworks to meet different needs and enlarge and expand the Transaction Per Second (TPS) of the whole network.

PoS consensus model

Delegating $FX token in PoS consensus enhances the security of the network while maintaing high throughput and performance.

Deploy once across multi chains - FunctionX’s true omnichain feature

Deploy standard solidity contracts once and your dapps will work natively across different chains. Omnichain on FunctionX allows Defi dapps or NFT projects to work on Cosmos, Ethereum, L2s (Base, Arbitrum, etc) and Solana as if deployed on each chains, including native Metamask support - all through FunctionX omnichain.
img_ray
f(x)Core
Bitcoin Layer2
Avalanche
Solana
ETH
Base Chain
Cosmos
Arbitrum
ETH Layer2
BNB Chain

ECOSYSTEM

Discover the hottest projects and partners on Function X
View All

FX Coin

$FX token is the governance token for the Function X project. Users may use $FX to vote, as collateral (soon), to generate synthetic assets (soon) delegate, securing the core network and more.

$FX token is available in Ethereum since 2019 and in Function X since 2021. They are fully interoperable and users can transfer $FX in and out of both blockchains while maintaining a fix total supply and liquidity. More chains are scheduled in Q4 2021 and 2022.
How to delegate
0 FX
Delegated into f(x)Core mainnet
0% APR
Delegation reward
LOCK>
MINT>
<UNLOCK
<BURN
Function X
Generating Token
Cross-chain Nodes
Maintain Consistency of Supply
Ethereum
Financial System Circulation

Listed Exchanges

Team

Council
Sub-council for technology
Sub-council for marketing (FXDM)

David Ben Kay

President
As the President of Function X, David leads the foundation’s efforts to create an ecosystem that is wholly powered by blockchain technology. Previously, he was the Ethereum Foundation's governing board member, General Counsel of Microsoft China, and Managing Partner of Denton Hall, Beijing. David received his JD from UCLA.
Claudio Barros
Sub-Council Member
SCENE
Sub-Council Member
David Ben Kay
President
Yos Adiguna Ginting
Council Member
Indra Winarta
Ecosystem Lead
Zac Cheah
Sub Council Member
FrenchXCore
Sub Council Member
Rafal W (kenorb)
Sub Council Member
Andreas Harpas
Sub-Council Member
Judie Liu
Sub-Council Member
Telchar
Sub-Council Member
Cryptowars
Sub-Council Member
Claudio Barros
Sub-Council Member
SCENE
Sub-Council Member
David Ben Kay
President
Yos Adiguna Ginting
Council Member

PUNDI WALLET - The easiest way to access Web3

NON-CUSTODIAL
Complete control and domination over your private keys and data. Your data and digital assets are only accessible to you.
EVM COMPATIBILITY
Interact with any EVM-compatible networks and dApps.
MULTI-CHAIN
Support and hold multiple public chain assets. Equipped with a multi-address and blockchain management dashboard that enables one to switch addresses or blockchains back and forth seamlessly.
Learn more
Subscribe to Function X News
Subscribe