Datasets:
ArXiv:
License:
| import os | |
| import json | |
| import time | |
| import argparse | |
| from functools import partial | |
| import tqdm | |
| import numpy as np | |
| import scipy | |
| import pandas as pd | |
| import torch | |
| from torch.utils.data import DataLoader, Subset | |
| import cupy as cp | |
| import pyscf | |
| import pyscf.df | |
| from gto import GTOBasis, element_to_atomic_number, GTOProductBasisHelper, build_irreps_from_mol, pyscf_to_standard_perm_D | |
| from dataset import SCFBenchDataset | |
| from nequip_model import nequip_simple_builder | |
| def move_data_to_device(data, device): | |
| def move(x): | |
| if isinstance(x, torch.Tensor): | |
| return x.to(device) | |
| elif isinstance(x, dict): | |
| return {k: move(v) for k, v in x.items()} | |
| else: | |
| return x | |
| return move(data) | |
| def run_scf_and_count_steps(mol, xc, dm0=None, verbose=False, with_df=False, with_df_basis='def2-svp-jkfit'): | |
| t_start = time.time() | |
| mf = mol.RKS(xc=xc) | |
| if with_df: | |
| mf = mf.density_fit(auxbasis=with_df_basis) | |
| mf = mf.to_gpu() | |
| if verbose: | |
| mf.verbose = 4 | |
| else: | |
| mf.verbose = 0 | |
| mf.scf(dm0=dm0) | |
| steps = mf.cycles if mf.converged else -1 | |
| e_tot = mf.e_tot | |
| return e_tot, steps, time.time() - t_start | |
| def transfer_fock(mol, transfer_mol, fock): | |
| S_cross = pyscf.gto.mole.intor_cross('int1e_ovlp', mol, transfer_mol) | |
| S1 = mol.intor('int1e_ovlp') | |
| C = scipy.linalg.solve(S1, S_cross) | |
| transfer_fock = C.T.dot(fock).dot(C) | |
| return transfer_fock | |
| def dm_from_fock(mol, overlap, fock): | |
| # from QHNet | |
| overlap = torch.tensor(overlap) | |
| fock = torch.tensor(fock) | |
| eigvals, eigvecs = torch.linalg.eigh(overlap) | |
| eps = 1e-8 * torch.ones_like(eigvals) | |
| eigvals = torch.where(eigvals > 1e-8, eigvals, eps) | |
| frac_overlap = eigvecs / torch.sqrt(eigvals).unsqueeze(-2) | |
| Fs = torch.mm(torch.mm(frac_overlap.transpose(-1, -2), fock), frac_overlap) | |
| orbital_energies, orbital_coefficients = torch.linalg.eigh(Fs) | |
| orbital_coefficients = torch.mm(frac_overlap, orbital_coefficients) | |
| num_orb = mol.nelectron // 2 | |
| orbital_coefficients = orbital_coefficients.squeeze() | |
| dm0 = orbital_coefficients[:, :num_orb].matmul(orbital_coefficients[:, :num_orb].T) * 2 | |
| dm0 = dm0.cpu().numpy() | |
| return dm0 | |
| def dm_from_auxdensity(mol, auxmol, xc, aux_vec, normalize_nelec, transfer_mol=None): | |
| import numpy | |
| import cupy | |
| from pyscf import lib | |
| import gpu4pyscf.scf.jk | |
| from gpu4pyscf.df.int3c2e_bdiv import Int3c2eOpt | |
| from gpu4pyscf.lib.cupy_helper import contract | |
| from gpu4pyscf.dft.numint import NumInt, add_sparse, _tau_dot, _scale_ao, _GDFTOpt | |
| from gpu4pyscf.dft import Grids | |
| from gpu4pyscf.lib.cupy_helper import transpose_sum | |
| def nr_rks(xctype, mol, xc_code, auxmol, aux_vec): | |
| # xctype = ni._xc_type(xc_code) | |
| if xctype == 'LDA': | |
| ao_deriv = 0 | |
| else: | |
| ao_deriv = 1 | |
| comp = (ao_deriv+1)*(ao_deriv+2)*(ao_deriv+3)//6 | |
| grids = Grids(mol).build() | |
| ngrids = grids.weights.size | |
| rho = cupy.zeros((comp, ngrids)) | |
| aux_ni = NumInt().build(auxmol, grids) | |
| naux = aux_ni.gdftopt._sorted_mol.nao | |
| _sorted_aux_vec = aux_ni.gdftopt.sort_orbitals(aux_vec, axis=[0]) | |
| p0 = p1 = 0 | |
| for aux_ao_on_grids, idx, weight, _ in aux_ni.block_loop(aux_ni.gdftopt._sorted_mol, grids, naux, ao_deriv): | |
| p0, p1 = p1, p1 + weight.size | |
| rho[:,p0:p1] = (contract('xig,i->xg', aux_ao_on_grids, _sorted_aux_vec[idx])) | |
| del aux_ao_on_grids, _sorted_aux_vec | |
| grids = Grids(mol).build() | |
| ni = NumInt().build(mol, grids) | |
| if xctype == 'MGGA': | |
| rho = cupy.vstack([rho, (rho[1:4]**2).sum(axis=0)/rho[0]/8]) | |
| weights = cupy.asarray(grids.weights) | |
| nelec = rho[0].dot(weights) | |
| exc, vxc = ni.eval_xc_eff(xc_code, rho, deriv=1, xctype=xctype)[:2] | |
| vxc = cupy.asarray(vxc, order='C') | |
| exc = cupy.asarray(exc, order='C') | |
| excsum = float(cupy.dot(rho[0]*weights, exc[:,0])) | |
| wv = vxc | |
| wv *= weights | |
| if xctype == 'GGA': | |
| wv[0] *= .5 | |
| if xctype == 'MGGA': | |
| wv[[0,4]] *= .5 | |
| opt = ni.gdftopt | |
| _sorted_mol = opt._sorted_mol | |
| nao = _sorted_mol.nao | |
| buf = None | |
| vtmp_buf = cupy.empty(nao*nao) | |
| vmat = cupy.zeros((nao, nao)) | |
| p0 = p1 = 0 | |
| for ao_mask, idx, weight, _ in ni.block_loop( | |
| _sorted_mol, grids, nao, ao_deriv, max_memory=None): | |
| p0, p1 = p1, p1 + weight.size | |
| nao_sub = len(idx) | |
| vtmp = cupy.ndarray((nao_sub, nao_sub), memptr=vtmp_buf.data) | |
| if xctype == 'LDA': | |
| aow = _scale_ao(ao_mask, wv[0,p0:p1], out=buf) | |
| add_sparse(vmat, ao_mask.dot(aow.T, out=vtmp), idx) | |
| elif xctype == 'GGA': | |
| aow = _scale_ao(ao_mask, wv[:,p0:p1], out=buf) | |
| add_sparse(vmat, ao_mask[0].dot(aow.T, out=vtmp), idx) | |
| elif xctype == 'MGGA': | |
| vtmp = _tau_dot(ao_mask, ao_mask, wv[4,p0:p1], buf=buf, out=vtmp) | |
| aow = _scale_ao(ao_mask, wv[:4,p0:p1], out=buf) | |
| vtmp = contract('ig,jg->ij', ao_mask[0], aow, beta=1., out=vtmp) | |
| add_sparse(vmat, vtmp, idx) | |
| vmat = opt.unsort_orbitals(vmat, axis=[0,1]) | |
| if xctype != 'LDA': | |
| transpose_sum(vmat) | |
| return nelec, excsum, vmat | |
| def get_j(mol, auxmol, aux_vec): | |
| int3c2e_opt = Int3c2eOpt(mol, auxmol).build() | |
| aux_vec_sorted = cupy.asarray(int3c2e_opt.aux_coeff).dot(cupy.asarray(aux_vec)) | |
| p1 = 0 | |
| J_compressed = 0 | |
| for eri3c in int3c2e_opt.int3c2e_bdiv_generator(batch_size=1200): | |
| p0, p1 = p1, p1 + eri3c.shape[1] | |
| J_compressed += eri3c.dot(aux_vec_sorted[p0:p1]) | |
| nao = int3c2e_opt.sorted_mol.nao | |
| J = cupy.zeros((nao, nao)) | |
| ao_pair_mapping = int3c2e_opt.create_ao_pair_mapping() | |
| rows, cols = divmod(cupy.asarray(ao_pair_mapping), nao) | |
| J[rows, cols] = J[cols, rows] = J_compressed | |
| c = cupy.asarray(int3c2e_opt.coeff) | |
| return c.T.dot(J).dot(c) | |
| def get_jk(mol, dm, auxmol, aux_vec): | |
| vj = get_j(mol, auxmol, aux_vec) | |
| _, vk = gpu4pyscf.scf.jk.get_jk(mol, dm, 1, with_j=False) | |
| return vj, vk | |
| def get_veff(ks, auxmol, aux_vec, dm, mol=None, hermi=1): | |
| time_stats = {} | |
| if mol is None: mol = ks.mol | |
| ni = ks._numint | |
| if hermi == 2: # because rho = 0 | |
| n, exc, vxc = 0, 0, 0 | |
| else: | |
| # no memory check | |
| # max_memory = ks.max_memory - lib.current_memory()[0] | |
| t_start = time.time() | |
| n, exc, vxc = nr_rks(ni._xc_type(ks.xc), mol, ks.xc, auxmol, aux_vec) | |
| time_stats['grid_eval_time'] = time.time() - t_start | |
| if ks.do_nlc(): | |
| raise NotImplementedError | |
| if not ni.libxc.is_hybrid_xc(ks.xc): | |
| t_start = time.time() | |
| vj = get_j(mol, auxmol, aux_vec) | |
| time_stats['get_j_time'] = time.time() - t_start | |
| vxc += vj | |
| else: | |
| omega, alpha, hyb = ni.rsh_and_hybrid_coeff(ks.xc, spin=mol.spin) | |
| if omega == 0: | |
| vj, vk = get_jk(mol, dm, auxmol, aux_vec) | |
| vk *= hyb | |
| else: | |
| raise NotImplementedError | |
| vxc += vj - vk * .5 | |
| return vxc, time_stats | |
| def int1e1c_analytical(mol): | |
| """ | |
| Calculate integral values | |
| .. math:: \int_{-\infty}^{\infty} phi(r) dr | |
| """ | |
| integral_val = [] | |
| for idx, aug_mom in enumerate(mol._bas[:, 1]): | |
| if aug_mom == 0: | |
| exponents = mol.bas_exp(idx) | |
| norm = (2 * exponents / numpy.pi)**0.75 | |
| coeffs = mol.bas_ctr_coeff(idx)[:, 0] | |
| integral_val.append( | |
| (coeffs * norm * (numpy.pi / exponents)**1.5).sum()) | |
| else: | |
| integral_val += [0] * (2*aug_mom+1) | |
| integral_val = numpy.array(integral_val) | |
| return integral_val | |
| extra_stats = {} | |
| if normalize_nelec: | |
| aux_1c1e = cp.array(int1e1c_analytical(auxmol)) | |
| aux_vec_nelec = aux_vec @ aux_1c1e | |
| aux_vec *= mol.nelectron / aux_vec_nelec | |
| extra_stats['aux_vec_nelec'] = aux_vec_nelec.get().item() | |
| mf = mol.RKS(xc=xc).to_gpu() | |
| dm_guess = 0 | |
| if mf._numint.libxc.is_hybrid_xc(mf.xc): | |
| dm_guess = mf.get_init_guess() | |
| fock, veff_time_stats = get_veff(mf, auxmol, aux_vec, dm_guess) | |
| fock = fock + mf.get_hcore() | |
| extra_stats.update(veff_time_stats) | |
| if transfer_mol is not None: | |
| fock = cupy.array(transfer_fock(mol, transfer_mol, fock.get())) | |
| mol = transfer_mol | |
| t_start = time.time() | |
| s1e = mol.intor('int1e_ovlp') | |
| mo_energy, mo_coeff = gpu4pyscf.lib.cupy_helper.eigh(cupy.array(fock), cupy.array(s1e)) | |
| extra_stats['eigh_time'] = time.time() - t_start | |
| nocc = mol.nelectron // 2 | |
| mocc = mo_coeff[:,:nocc] | |
| dm0 = mocc.dot(mocc.conj().T) * 2 | |
| dm0_numpy = dm0.get() | |
| return dm0_numpy, extra_stats | |
| def build_mol_from_data(type_names, aobasis_name, data): | |
| nuclei = [element_to_atomic_number[type_names[z]] for z in data['z'].cpu().numpy()] | |
| coords = data['pos'].cpu().numpy() | |
| mol = pyscf.M(atom=list(zip(nuclei, coords)), basis=aobasis_name) | |
| return mol | |
| def pred_auxdensity_postprocess(mol, outputs, auxbasis_name, auxbasis, xc, use_denfit_ovlp, normalize_nelec, transfer_mol=None): | |
| t0 = time.time() | |
| preds = outputs['output:auxdensity'] | |
| if auxbasis_name.startswith('etb:'): | |
| beta = float(auxbasis_name.split(':')[-1]) | |
| etb_basis = pyscf.df.aug_etb(mol, beta) | |
| auxmol = pyscf.df.make_auxmol(mol, auxbasis=etb_basis) | |
| else: | |
| auxmol = pyscf.df.make_auxmol(mol, auxbasis=auxbasis_name) | |
| atom_count_by_elements = {k: 0 for k in preds.keys()} | |
| atom_vecs = [] | |
| for iatm in range(mol.natm): | |
| symbol = mol.atom_pure_symbol(iatm) | |
| atom_vecs.append(preds[symbol][atom_count_by_elements[symbol]]) | |
| atom_count_by_elements[symbol] += 1 | |
| aux_vec = torch.cat(atom_vecs) | |
| mol_irreps = build_irreps_from_mol(auxmol) | |
| aux_vec = pyscf_to_standard_perm_D(mol_irreps).T.to(aux_vec) @ aux_vec | |
| aux_vec = aux_vec.cpu().numpy() | |
| if use_denfit_ovlp: | |
| aux_vec = np.linalg.solve(auxmol.intor('int1e_ovlp'), aux_vec) | |
| t1 = time.time() | |
| # dm = dm_from_auxdensity(mol, auxmol, xc, aux_vec, normalize_nelec, transfer_mol) | |
| dm, time_stats = dm_from_auxdensity(mol, auxmol, xc, cp.array(aux_vec), normalize_nelec, transfer_mol) | |
| time_stats['model_to_auxvec_time'] = t1 - t0 | |
| return dm, time_stats | |
| def pred_fock_postprocess(mol, outputs, ao_prod_basis, normalize_nelec, transfer_mol=None): | |
| fock = ao_prod_basis.assemble_matrix_from_padded_blocks(mol.atom_charges(), outputs['output:fock_diag_blocks'].cpu().numpy(), outputs['output:fock_tril_blocks'].cpu().numpy()) | |
| fock = ao_prod_basis.transform_from_std_to_pyscf(mol.atom_charges(), fock) | |
| if transfer_mol is None: | |
| overlap = mol.intor('int1e_ovlp').astype(np.float32) | |
| dm = dm_from_fock(mol, overlap, fock) | |
| else: | |
| overlap = transfer_mol.intor('int1e_ovlp') | |
| transfer_fock = transfer_fock(mol, transfer_mol, fock) | |
| dm = dm_from_fock(transfer_mol, overlap, transfer_fock) | |
| if normalize_nelec: | |
| dm *= mol.nelectron / (dm * overlap).sum() | |
| return dm, {} | |
| def pred_dm_postprocess(mol, outputs, ao_prod_basis, normalize_nelec, transfer_mol=None): | |
| dm = ao_prod_basis.assemble_matrix_from_padded_blocks(mol.atom_charges(), outputs['output:dm_diag_blocks'].cpu().numpy(), outputs['output:dm_tril_blocks'].cpu().numpy()) | |
| dm = ao_prod_basis.transform_from_std_to_pyscf(mol.atom_charges(), dm) | |
| if normalize_nelec: | |
| overlap = mol.intor('int1e_ovlp').astype(np.float32) | |
| dm *= mol.nelectron / (dm * overlap).sum() | |
| if transfer_mol is not None: | |
| raise NotImplementedError | |
| return dm, {} | |
| def main(): | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument('--ckpt', type=str, help='Path to the checkpoint file') | |
| parser.add_argument('--gt-mode', action='store_true', help='Use ground truth as initial guess.') | |
| parser.add_argument('--data-root', default='./dataset/ood-test', type=str, help='Path to the data root directory') | |
| parser.add_argument('--model-type', default='auxdensity', type=str, choices=['auxdensity', 'dm', 'fock'], help='Prediction type of the model') | |
| parser.add_argument('--xc', default='PBE', type=str, help='XC functional') | |
| parser.add_argument('--transfer-basis', default=None, type=str, help='Basis set to which the model prediction is transferred to.') | |
| parser.add_argument('--with-df', action='store_true', help='Use density fitting for SCF.') | |
| parser.add_argument('--with-df-basis', default='def2-svp-jkfit', help='The basis set for density fitting in SCF.') | |
| parser.add_argument('--output', required=True, type=str, help='Path to the output CSV file') | |
| parser.add_argument('--num-shards', type=int, default=1, help='Number of shards') | |
| parser.add_argument('--shard-index', default=0, type=int, help='Shard index') | |
| parser.add_argument('--scf-verbose', action='store_true', help='Print SCF verbose information') | |
| parser.add_argument('--split', default='no', type=str, choices=['train', 'val', 'test', 'no'], help='Split of the dataset') | |
| parser.add_argument('--normalize-nelec', action='store_true', help='Normalize the model prediction by number of electrons.') | |
| args = parser.parse_args() | |
| if args.ckpt is not None: | |
| # if a ckpt is provided, use the data config in the ckpt | |
| ckpt = torch.load(args.ckpt, map_location='cpu', weights_only=True) | |
| config = ckpt['config'] | |
| data_config = config['data'] | |
| if not args.gt_mode: | |
| model = nequip_simple_builder(**config['model']) | |
| model.load_state_dict(ckpt['state_dict']) | |
| model = model.eval().cuda() | |
| else: | |
| assert args.gt_mode, 'Checkpoint path is required in non-gt mode.' | |
| gt_mode_part = 'auxdensity.denfit' if args.model_type == 'auxdensity' else args.model_type | |
| data_config = { | |
| 'r_max': 5.0, | |
| 'type_names': ['H', 'C', 'N', 'O', 'F', 'P', 'S'], | |
| 'remove_self_loops': True, | |
| 'parts_to_load': ['base', gt_mode_part], | |
| 'aobasis': 'def2-svp', | |
| 'auxbasis': 'def2-universal-jfit', | |
| 'use_denfit_ovlp': False, | |
| } | |
| dataset = SCFBenchDataset(data_root=args.data_root, **data_config) | |
| if args.split != 'no': | |
| print(f'Assuming the dataset is SCFbench-main and using indices from scfbench_main_split.npz for split {args.split}...') | |
| split_file = np.load('./scfbench_main_split.npz') | |
| dataset_indices = split_file[args.split].tolist() | |
| else: | |
| dataset_indices = list(range(len(dataset))) | |
| dataset_subset_indices = [j for i, j in enumerate(dataset_indices) if i % args.num_shards == args.shard_index] | |
| dataloader = DataLoader( | |
| Subset(dataset, dataset_subset_indices), | |
| batch_size=1, | |
| shuffle=False, | |
| num_workers=2, | |
| collate_fn=dataset.collater, | |
| ) | |
| aobasis_name, auxbasis_name = data_config['aobasis'], data_config['auxbasis'] | |
| aobasis = GTOBasis.from_basis_name(aobasis_name, elements=data_config['type_names']) | |
| auxbasis = GTOBasis.from_basis_name(auxbasis_name, elements=data_config['type_names']) | |
| ao_prod_basis = GTOProductBasisHelper(aobasis) | |
| use_denfit_ovlp = data_config.get('use_denfit_ovlp', False) | |
| records = [] | |
| for data in tqdm.tqdm(dataloader): | |
| data = move_data_to_device(data, 'cuda') | |
| mol = build_mol_from_data(data_config['type_names'], aobasis_name, data) | |
| transfer_mol = build_mol_from_data(data_config['type_names'], args.transfer_basis, data) if args.transfer_basis else None | |
| if not args.gt_mode: | |
| model_time = 1000.0 | |
| while model_time > 1: | |
| t0 = time.time() | |
| with torch.no_grad(): | |
| outputs = model(data) | |
| t1 = time.time() | |
| model_time = t1 - t0 | |
| else: | |
| if args.model_type == 'auxdensity': | |
| outputs = {'output:auxdensity': data['auxdensity']} | |
| elif args.model_type == 'fock': | |
| outputs = {'output:fock_diag_blocks': data['fock_diag_blocks'], 'output:fock_tril_blocks': data['fock_tril_blocks']} | |
| elif args.model_type == 'dm': | |
| outputs = {'output:dm_diag_blocks': data['dm_diag_blocks'], 'output:dm_tril_blocks': data['dm_tril_blocks']} | |
| model_time = 0.0 | |
| t2 = time.time() | |
| if args.model_type == 'auxdensity': | |
| dm, extra_stats = pred_auxdensity_postprocess(mol, outputs, auxbasis_name, auxbasis, args.xc, use_denfit_ovlp, args.normalize_nelec, transfer_mol=transfer_mol) | |
| elif args.model_type == 'fock': | |
| dm, extra_stats = pred_fock_postprocess(mol, outputs, ao_prod_basis, args.normalize_nelec, transfer_mol=transfer_mol) | |
| elif args.model_type == 'dm': | |
| dm, extra_stats = pred_dm_postprocess(mol, outputs, ao_prod_basis, args.normalize_nelec, transfer_mol=transfer_mol) | |
| t3 = time.time() | |
| eval_scf = partial(run_scf_and_count_steps, xc=args.xc, verbose=args.scf_verbose, with_df=args.with_df, with_df_basis=args.with_df_basis) | |
| if transfer_mol is not None: | |
| original_energy, original_steps, original_time = eval_scf(transfer_mol, dm0=None) | |
| accelerated_energy, accelerated_steps, accelerated_time = eval_scf(transfer_mol, dm0=cp.array(dm.astype(np.float64))) # the type conversion is important here | |
| else: | |
| original_energy, original_steps, original_time = eval_scf(mol, dm0=None) | |
| accelerated_energy, accelerated_steps, accelerated_time = eval_scf(mol, dm0=cp.array(dm.astype(np.float64))) # the type conversion is important here | |
| record = { | |
| 'natom': mol.natm, | |
| 'nelec': mol.nelectron, | |
| 'original_steps': original_steps, | |
| 'accelerated_steps': accelerated_steps, | |
| 'original_time': original_time, | |
| 'accelerated_time': accelerated_time, | |
| 'original_energy': original_energy, | |
| 'accelerated_energy': accelerated_energy, | |
| 'model_time': model_time, | |
| 'conversion_time': t3 - t2, | |
| } | |
| record.update(extra_stats) | |
| print(record) | |
| records.append(record) | |
| del data, mol, dm, outputs | |
| if len(records) % 10 == 0: | |
| df = pd.DataFrame.from_records(records) | |
| df.to_csv(args.output, index=False) | |
| df = pd.DataFrame.from_records(records) | |
| df.to_csv(args.output, index=False) | |
| if __name__ == '__main__': | |
| main() | |