library(tidyverse) library(arrow) library(here) # these are the protein coding non dubious loci mahendrawada_features = arrow::read_parquet("~/code/hf/mahendrawada_2025/features_mahendrawada_2025.parquet") # read in and prepare the perturbation response data perturbation_response_data = list( mahendrawada_rnaseq = arrow::read_parquet("~/code/hf/mahendrawada_2025/rnaseq_reprocessed.parquet") %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag) %>% replace_na(list(log2FoldChange = 0, pvalue = 1)) %>% mutate(abs_log2fc = abs(log2FoldChange)), # kemmeren requires deduplicating instances where there are multiple probes # to the same locus_tag. Take the max kemmeren = arrow::open_dataset("~/code/hf/kemmeren_2014/kemmeren_2014.parquet") %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag, str_detect(regulator_locus_tag, "WT-", negate=TRUE)) %>% select(sample_id, regulator_locus_tag, target_locus_tag, Madj, pval) %>% arrow::to_duckdb() %>% group_by(sample_id, target_locus_tag) %>% mutate(rn = row_number(desc(abs(Madj)))) %>% filter(rn == 1) %>% select(-rn) %>% ungroup() %>% collect(), hackett = arrow::read_parquet("~/code/hf/hackett_2020/hackett_2020.parquet") %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag, str_detect(regulator_locus_tag, "WT-", negate=TRUE)) %>% select(sample_id, regulator_locus_tag, target_locus_tag, log2_shrunken_timecourses) %>% arrow::to_duckdb() %>% group_by(sample_id, target_locus_tag) %>% mutate(rn = row_number(desc(abs(log2_shrunken_timecourses)))) %>% filter(rn == 1) %>% select(-rn) %>% ungroup() %>% collect() %>% # add this for consistency with the other datasets mutate(pvalue = 0), hu_reimand = arrow::read_parquet("~/code/hf/hu_2007_reimand_2010/hu_2007_reimand_2010.parquet") %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag) %>% select(sample_id, regulator_locus_tag, target_locus_tag, effect, pval) %>% arrow::to_duckdb() %>% group_by(sample_id, target_locus_tag) %>% mutate(rn = row_number(desc(abs(effect)))) %>% filter(rn == 1) %>% select(-rn) %>% ungroup() %>% collect(), hughes_ko = arrow::read_parquet("~/code/hf/hughes_2006/knockout.parquet") %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag) %>% select(sample_id, regulator_locus_tag, target_locus_tag, mean_norm_log2fc) %>% arrow::to_duckdb() %>% group_by(sample_id, target_locus_tag) %>% mutate(rn = row_number(desc(abs(mean_norm_log2fc)))) %>% filter(rn == 1) %>% select(-rn) %>% ungroup() %>% collect() %>% # add this for consistency with the other datasets mutate(pvalue = 0), hughes_oe = arrow::read_parquet("~/code/hf/hughes_2006/overexpression.parquet") %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag) %>% select(sample_id, regulator_locus_tag, target_locus_tag, mean_norm_log2fc) %>% arrow::to_duckdb() %>% group_by(sample_id, target_locus_tag) %>% mutate(rn = row_number(desc(abs(mean_norm_log2fc)))) %>% filter(rn == 1) %>% select(-rn) %>% ungroup() %>% collect() %>% # add this for consistency with the other datasets mutate(pvalue = 0) ) composite_cc = arrow::open_dataset("~/code/hf/callingcards/annotated_features_combined") %>% collect() %>% left_join(arrow::read_parquet("~/code/hf/callingcards/annotated_features_combined_meta.parquet")) %>% dplyr::rename(id = genome_map_id_set) single_cc_meta = arrow::read_parquet("~/code/hf/callingcards/annotated_features_meta.parquet") %>% filter(batch != "composite") single_cc = arrow::open_dataset("~/code/hf/callingcards/annotated_features") %>% filter(id %in% single_cc_meta$id) %>% collect() %>% left_join(single_cc_meta) %>% mutate(id = as.character(id)) # note: filter these for the mahendrawada features, too. Restricts analysis # to only non dubious genomic loci binding_data = list( cc = single_cc %>% select(intersect(colnames(.), colnames(composite_cc))) %>% bind_rows(composite_cc %>% select(intersect(colnames(.), colnames(single_cc)))) %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag), harbison = arrow::read_parquet("~/code/hf/harbison_2004/harbison_2004.parquet") %>% replace_na(list(effect = 0, pvalue = 1)) %>% group_by(sample_id, target_locus_tag) %>% slice_max(abs(effect), n = 1, with_ties = FALSE) %>% ungroup() %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag), chipexo = arrow::read_parquet("~/code/hf/rossi_2021/rossi_2021_af_combined.parquet") %>% left_join(arrow::read_parquet("~/code/hf/rossi_2021/rossi_2021_metadata_sample.parquet")) %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag), mahendrawada_chec = arrow::read_parquet("~/code/hf/mahendrawada_2025/chec_mahendrawada_m2025_af_combined.parquet") %>% left_join(arrow::read_parquet("~/code/hf/mahendrawada_2025/chec_mahendrawada_m2025_af_combined_meta.parquet")) %>% filter(target_locus_tag %in% mahendrawada_features$locus_tag) ) # Function to create DTO for a given PR dataset create_pr_dto = function(pr_data, pr_effect_col, pr_pval_col, binding_data_list) { # Standardize column names for the PR data pr_standardized = pr_data %>% ungroup() %>% # remove the target observation for the perturbed locus # NOTE: this is also done for the binding data, though i don't # remove it from the background filter(regulator_locus_tag != target_locus_tag) # Handle effect column renaming if (pr_effect_col != "effect") { pr_standardized = pr_standardized %>% rename(effect = !!sym(pr_effect_col)) } # Handle pvalue column renaming if (pr_pval_col != "pvalue") { # If renaming a different column to pvalue, drop existing pvalue column first if ("pvalue" %in% colnames(pr_standardized)) { pr_standardized = pr_standardized %>% select(-pvalue) } pr_standardized = pr_standardized %>% rename(pvalue = !!sym(pr_pval_col)) } # Create DTOs for each binding dataset dto_list = list( cc = list( binding = binding_data_list$cc %>% filter(regulator_locus_tag != target_locus_tag) %>% filter(poisson_pval <= 0.1) %>% filter(regulator_locus_tag %in% unique(pr_standardized$regulator_locus_tag), target_locus_tag %in% unique(pr_standardized$target_locus_tag)) %>% group_by(id) %>% arrange(desc(callingcards_enrichment)) %>% mutate(pvalue_rank = rank(poisson_pval, ties.method = 'min')) %>% dplyr::rename(sample_id = id) %>% group_by(sample_id), pr = pr_standardized %>% filter(pvalue <= 0.1) %>% filter(regulator_locus_tag %in% unique(binding_data_list$cc$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$cc$target_locus_tag)) %>% group_by(sample_id) %>% mutate(abs_effect_rank = rank(-abs(effect), ties.method = 'min'), pvalue_rank = rank(pvalue, ties.method = 'min')) %>% group_by(sample_id), background = pr_standardized %>% filter(regulator_locus_tag %in% unique(binding_data_list$cc$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$cc$target_locus_tag)) %>% pull(target_locus_tag) %>% unique()), harbison = list( binding = binding_data_list$harbison %>% filter(regulator_locus_tag != target_locus_tag) %>% filter(pvalue <= 0.1) %>% filter(regulator_locus_tag %in% unique(pr_standardized$regulator_locus_tag), target_locus_tag %in% unique(pr_standardized$target_locus_tag)) %>% group_by(sample_id) %>% arrange(desc(effect)) %>% mutate(pvalue_rank = rank(pvalue, ties.method = 'min')) %>% group_by(sample_id), pr = pr_standardized %>% filter(pvalue <= 0.1) %>% filter(regulator_locus_tag %in% unique(binding_data_list$harbison$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$harbison$target_locus_tag)) %>% group_by(sample_id) %>% mutate(abs_effect_rank = rank(-abs(effect), ties.method = 'min'), pvalue_rank = rank(pvalue, ties.method = 'min')) %>% group_by(sample_id), background = pr_standardized %>% filter(regulator_locus_tag %in% unique(binding_data_list$harbison$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$harbison$target_locus_tag)) %>% pull(target_locus_tag) %>% unique()), chipexo = list( binding = binding_data_list$chipexo %>% filter(regulator_locus_tag != target_locus_tag) %>% filter(log_poisson_pval <= log(0.1)) %>% filter(regulator_locus_tag %in% unique(pr_standardized$regulator_locus_tag), target_locus_tag %in% unique(pr_standardized$target_locus_tag)) %>% group_by(sample_id) %>% arrange(desc(enrichment)) %>% mutate(pvalue_rank = rank(log_poisson_pval, ties.method = 'min')) %>% group_by(sample_id), pr = pr_standardized %>% filter(pvalue <= 0.1) %>% filter(regulator_locus_tag %in% unique(binding_data_list$chipexo$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$chipexo$target_locus_tag)) %>% group_by(sample_id) %>% mutate(abs_effect_rank = rank(-abs(effect), ties.method = 'min'), pvalue_rank = rank(pvalue, ties.method = 'min')) %>% group_by(sample_id), background = pr_standardized %>% filter(regulator_locus_tag %in% unique(binding_data_list$chipexo$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$chipexo$target_locus_tag)) %>% pull(target_locus_tag) %>% unique()), mahendrawada_chec = list( binding = binding_data_list$mahendrawada_chec %>% filter(regulator_locus_tag != target_locus_tag) %>% filter(log_poisson_pval <= log(0.1)) %>% filter(regulator_locus_tag %in% unique(pr_standardized$regulator_locus_tag), target_locus_tag %in% unique(pr_standardized$target_locus_tag)) %>% group_by(sample_id) %>% arrange(desc(enrichment)) %>% mutate(pvalue_rank = rank(log_poisson_pval, ties.method = 'min')) %>% group_by(sample_id), pr = pr_standardized %>% filter(pvalue <= 0.1) %>% filter(regulator_locus_tag %in% unique(binding_data_list$mahendrawada_chec$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$mahendrawada_chec$target_locus_tag)) %>% group_by(sample_id) %>% mutate(abs_effect_rank = rank(-abs(effect), ties.method = 'min'), pvalue_rank = rank(pvalue, ties.method = 'min')) %>% group_by(sample_id), background = pr_standardized %>% filter(regulator_locus_tag %in% unique(binding_data_list$mahendrawada_chec$regulator_locus_tag), target_locus_tag %in% unique(binding_data_list$mahendrawada_chec$target_locus_tag)) %>% pull(target_locus_tag) %>% unique()) ) return(dto_list) } # Create all DTOs all_pr_dtos = list( mahendrawada_rnaseq = create_pr_dto( perturbation_response_data$mahendrawada_rnaseq, pr_effect_col = "log2FoldChange", pr_pval_col = "padj", binding_data_list = binding_data ), kemmeren = create_pr_dto( perturbation_response_data$kemmeren, pr_effect_col = "Madj", pr_pval_col = "pval", binding_data_list = binding_data ), hackett = create_pr_dto( perturbation_response_data$hackett, pr_effect_col = "log2_shrunken_timecourses", pr_pval_col = "pvalue", binding_data_list = binding_data ), hu_reimand = create_pr_dto( perturbation_response_data$hu_reimand, pr_effect_col = "effect", pr_pval_col = "pval", binding_data_list = binding_data ), hughes_ko = create_pr_dto( perturbation_response_data$hughes_ko, pr_effect_col = "mean_norm_log2fc", pr_pval_col = "pvalue", binding_data_list = binding_data ), hughes_oe = create_pr_dto( perturbation_response_data$hughes_oe, pr_effect_col = "mean_norm_log2fc", pr_pval_col = "pvalue", binding_data_list = binding_data ) ) # Write out DTO ranked lists write_out_pr_dto_lists = function(pr_dataset_name, binding_pr_set_name, all_pr_dtos_list, base_outdir=here("results/dto")) { output_path = file.path(base_outdir, pr_dataset_name) binding_pr_set = all_pr_dtos_list[[pr_dataset_name]][[binding_pr_set_name]] binding_split = binding_pr_set$binding %>% group_split() names(binding_split) = pull(group_keys(binding_pr_set$binding), sample_id) pr_split = binding_pr_set$pr %>% group_split() names(pr_split) = pull(group_keys(binding_pr_set$pr), sample_id) curr_output_path = list( binding = file.path(output_path, binding_pr_set_name, "binding"), pr_effect = file.path(output_path, binding_pr_set_name, "pr", "effect"), pr_pvalue = file.path(output_path, binding_pr_set_name, "pr", "pvalue") ) map(curr_output_path, dir.create, recursive = TRUE, showWarnings = FALSE) # Write out binding lists map(names(binding_split), ~{ binding_split[[.x]] %>% select(target_locus_tag, pvalue_rank) %>% arrange(pvalue_rank) %>% write_csv(file.path(curr_output_path$binding, paste0(.x, ".csv")), col_names = FALSE) }) # Write out effect-ranked pr lists map(names(pr_split), ~{ pr_split[[.x]] %>% select(target_locus_tag, abs_effect_rank) %>% arrange(abs_effect_rank) %>% write_csv(file.path(curr_output_path$pr_effect, paste0(.x, ".csv")), col_names = FALSE) }) # Write out pvalue pr lists map(names(pr_split), ~{ pr_split[[.x]] %>% select(target_locus_tag, pvalue_rank) %>% arrange(pvalue_rank) %>% write_csv(file.path(curr_output_path$pr_pvalue, paste0(.x, ".csv")), col_names = FALSE) }) # Write out background tibble(target_locus_tag = binding_pr_set$background) %>% write_csv(file.path(output_path, binding_pr_set_name, "background.csv"), col_names = FALSE) } # Generalized function to create lookups create_pr_lookups = function(pr_dataset_name, binding_pr_set_name, all_pr_dtos_list, scratch_path = "/scratch/mblab/chasem/dto") { # Get binding and PR sample IDs binding_samples = all_pr_dtos_list[[pr_dataset_name]][[binding_pr_set_name]]$binding %>% ungroup() %>% dplyr::select(sample_id, regulator_locus_tag) %>% distinct() %>% dplyr::rename(binding_id = sample_id) pr_samples = all_pr_dtos_list[[pr_dataset_name]][[binding_pr_set_name]]$pr %>% ungroup() %>% dplyr::select(sample_id, regulator_locus_tag) %>% distinct() %>% dplyr::rename(pr_id = sample_id) # Full join to identify incomplete cases - use relationship = "many-to-many" lookup_df = binding_samples %>% full_join(pr_samples, by = "regulator_locus_tag", relationship = "many-to-many") %>% mutate(binding = if_else(!is.na(binding_id), file.path(scratch_path, pr_dataset_name, binding_pr_set_name, "binding", paste0(binding_id, ".csv")), NA_character_), pr_effect = if_else(!is.na(pr_id), file.path(scratch_path, pr_dataset_name, binding_pr_set_name, "pr", "effect", paste0(pr_id, ".csv")), NA_character_), pr_pvalue = if_else(!is.na(pr_id), file.path(scratch_path, pr_dataset_name, binding_pr_set_name, "pr", "pvalue", paste0(pr_id, ".csv")), NA_character_)) # Separate complete and incomplete cases complete_lookup = lookup_df %>% filter(!is.na(binding_id) & !is.na(pr_id)) %>% select(binding, pr_effect, pr_pvalue) incomplete_after_filtering = lookup_df %>% filter(is.na(binding_id) | is.na(pr_id)) %>% mutate(missing_type = case_when( is.na(binding_id) & is.na(pr_id) ~ "both", is.na(binding_id) ~ "binding", is.na(pr_id) ~ "pr", TRUE ~ "unknown" )) %>% select(regulator_locus_tag, binding_id, pr_id, missing_type) %>% distinct() # Add distinct here too to avoid duplicate incomplete rows return(list( lookup = complete_lookup, incomplete_after_filtering = incomplete_after_filtering )) } # # Write out all DTOs for all PR datasets # lookup_results = list() # # dto_input_outdir = here("results/dto") # for (pr_name in names(all_pr_dtos)) { # lookup_results[[pr_name]] = list() # # for (binding_name in names(all_pr_dtos[[pr_name]])) { # write_out_pr_dto_lists(pr_name, binding_name, all_pr_dtos) # # lookup_result = create_pr_lookups(pr_name, binding_name, all_pr_dtos) # lookup_results[[pr_name]][[binding_name]] = lookup_result # # # Write complete lookups only # lookup_result$lookup %>% # write_tsv(file.path(dto_input_outdir, pr_name, binding_name, "lookup.txt"), # col_names = FALSE) # # # Write incomplete cases for reference # if (nrow(lookup_result$incomplete_after_filtering) > 0) { # lookup_result$incomplete_after_filtering %>% # write_csv(file.path(dto_input_outdir, pr_name, binding_name, "incomplete.csv")) # } # } # } # Summary of incomplete cases across all datasets # incomplete_summary = map_dfr(names(lookup_results), ~{ # map_dfr(names(lookup_results[[.x]]), function(binding_name) { # lookup_results[[.x]][[binding_name]]$incomplete_after_filtering %>% # mutate(pr_dataset = .x, binding_dataset = binding_name) # }) # }) # print(incomplete_summary %>% count(pr_dataset, binding_dataset, missing_type))