## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", eval = FALSE ) ## ----setup-------------------------------------------------------------------- # library(bidux) # library(dplyr) # library(purrr) ## ----automated-bid-pipeline--------------------------------------------------- # # Create a comprehensive BID analysis function # analyze_dashboard_ux <- function(dashboard_config, telemetry_path = NULL) { # # Stage 1: Interpret (from configuration) # interpret_stage <- bid_interpret( # central_question = dashboard_config$central_question, # data_story = dashboard_config$data_story, # user_personas = dashboard_config$personas # ) # # # If telemetry exists, integrate it into the workflow # if (!is.null(telemetry_path) && file.exists(telemetry_path)) { # # Ingest telemetry and identify issues # telemetry_issues <- bid_telemetry(telemetry_path) # # # Convert top issues to Notice stages # notice_stages <- bid_notices( # issues = telemetry_issues |> # filter(severity %in% c("critical", "high")) |> # slice_head(n = 3), # previous_stage = interpret_stage # ) # # # Use the most critical issue as primary focus # primary_notice <- notice_stages[[1]] # } else { # # Manual problem definition if no telemetry # primary_notice <- bid_notice( # previous_stage = interpret_stage, # problem = dashboard_config$known_problems, # evidence = dashboard_config$evidence # ) # } # # # Stage 3: Anticipate with domain-specific biases # anticipate_stage <- bid_anticipate( # previous_stage = primary_notice, # bias_mitigations = dashboard_config$bias_mitigations %||% # get_domain_biases(dashboard_config$domain) # ) # # # Stage 4: Structure with telemetry flags if available # structure_flags <- if ( # !is.null(telemetry_path) && file.exists(telemetry_path) # ) { # bid_flags(telemetry_issues) # } else { # NULL # } # # structure_stage <- bid_structure( # previous_stage = anticipate_stage, # telemetry_flags = structure_flags # ) # # # Stage 5: Validate with domain-specific next steps # validate_stage <- bid_validate( # previous_stage = structure_stage, # summary_panel = generate_summary_panel(dashboard_config), # collaboration = get_collaboration_features(dashboard_config$team_size), # next_steps = generate_next_steps(dashboard_config, structure_stage) # ) # # return(validate_stage) # } # # # Domain-specific bias patterns # get_domain_biases <- function(domain) { # bias_patterns <- list( # "finance" = list( # loss_aversion = "Show both gains and losses clearly with proper context", # anchoring = "Provide multiple reference points (budget, previous period, industry average)", # confirmation_bias = "Include contrarian indicators and risk metrics" # ), # "marketing" = list( # attribution_bias = "Show multi-touch attribution to avoid overvaluing last-click", # survivorship_bias = "Include data on discontinued campaigns and failed experiments", # framing = "Toggle between cost-per-acquisition and customer-lifetime-value views" # ), # "operations" = list( # availability_bias = "Surface less-visible but important operational metrics", # recency_bias = "Balance recent performance with longer-term trends", # cognitive_load = "Use progressive disclosure for complex operational dashboards" # ) # ) # # return( # bias_patterns[[domain]] %||% # list( # anchoring = "Provide appropriate reference points", # framing = "Consider multiple perspectives on the same data", # confirmation_bias = "Include challenging or contrarian data points" # ) # ) # } # # # Batch analyze multiple dashboards # analyze_dashboard_portfolio <- function(dashboard_configs) { # results <- map(dashboard_configs, analyze_dashboard_ux) # names(results) <- map_chr(dashboard_configs, "name") # # # Generate portfolio-level insights # portfolio_summary <- summarize_portfolio_ux(results) # # return( # list( # individual_analyses = results, # portfolio_summary = portfolio_summary, # improvement_priorities = rank_improvement_opportunities(results) # ) # ) # } # # # Example usage # dashboard_portfolio <- list( # list( # name = "Executive Dashboard", # domain = "finance", # central_question = "How is the business performing this quarter?", # data_story = list( # hook = "Quarterly performance varies significantly across business units", # context = "Board meeting preparation requires clear performance narrative", # tension = "Current reports are too detailed for executive review", # resolution = "Provide executive summary with drill-down capability" # ), # personas = list( # list( # name = "CEO", # technical_level = "Basic", # time_constraints = "5 minutes" # ), # list( # name = "CFO", # technical_level = "Intermediate", # focus = "Financial metrics" # ) # ), # known_problems = "Information overload in current quarterly reviews", # evidence = "Board meetings consistently run over time due to data interpretation" # ), # # Additional dashboard configurations... # ) # # # Run portfolio analysis # portfolio_results <- analyze_dashboard_portfolio(dashboard_portfolio) ## ----custom-concepts---------------------------------------------------------- # # Add domain-specific behavioral concepts # add_custom_concepts <- function() { # # Define custom concepts for your domain # custom_finance_concepts <- tibble( # concept = c( # "Risk Perception Bias", # "Mental Accounting", # "Temporal Discounting" # ), # category = "Financial Psychology", # description = c( # "Tendency to perceive identical risks differently based on presentation context", # "Treating money differently based on its source or intended use", # "Overvaluing immediate rewards relative to future benefits" # ), # implementation_tips = c( # "Present risks in multiple formats (percentages, frequencies, visual scales)", # "Show total portfolio impact rather than individual position P&L", # "Include time-based context and compound effect visualizations" # ), # shiny_components = c( # "plotly for interactive risk visualization, bslib progress bars for probability", # "DT tables with conditional formatting, reactable grouping features", # "echarts4r timeline components, animated value transitions" # ) # ) # # # You could extend the package concept dictionary (advanced users only) # # This would require package development workflow # # return(custom_finance_concepts) # } # # # Create domain-specific BID analysis functions # analyze_financial_dashboard <- function(config, custom_concepts = NULL) { # # Load custom concepts if provided # if (!is.null(custom_concepts)) { # # Use custom concepts in analysis # relevant_concepts <- filter( # custom_concepts, # grepl(config$domain_keywords, concept, ignore.case = TRUE) # ) # } # # # Apply standard BID workflow with custom extensions # result <- analyze_dashboard_ux(config) # # # Add domain-specific analysis # result$domain_insights <- generate_domain_insights(result, custom_concepts) # result$specialized_suggestions <- get_domain_suggestions( # result, # config$domain # ) # # return(result) # } # # # Generate domain-specific insights # generate_domain_insights <- function(bid_result, custom_concepts = NULL) { # insights <- list() # # # Analyze layout choice against domain best practices # layout <- bid_result$layout[1] # # if (layout == "dual_process") { # insights$layout_analysis <- "Dual-process layout chosen. Good for financial dashboards requiring both summary and detailed analysis." # ) # } # # # Check for domain-specific bias considerations # if (!is.null(custom_concepts)) { # # Suggest additional bias mitigations based on custom concepts # insights$additional_biases <- suggest_domain_biases( # bid_result, # custom_concepts # ) # } # # return(insights) # } ## ----ux-ab-testing------------------------------------------------------------ # # Framework for testing UX improvements # design_ux_experiment <- function(current_design, proposed_design, hypothesis) { # experiment_design <- list( # hypothesis = hypothesis, # primary_metrics = c( # "time_to_first_interaction", # "task_completion_rate", # "user_satisfaction_score", # "session_duration" # ), # secondary_metrics = c( # "error_rate", # "feature_adoption_rate", # "return_visit_rate" # ), # variants = list( # control = current_design, # treatment = proposed_design # ), # sample_size_calculation = calculate_ux_sample_size( # baseline_completion_rate = 0.65, # minimum_detectable_effect = 0.10, # power = 0.80, # alpha = 0.05 # ) # ) # # return(experiment_design) # } # # # Calculate required sample size for UX experiments # calculate_ux_sample_size <- function( # baseline_completion_rate, # minimum_detectable_effect, # power = 0.80, # alpha = 0.05) { # # Using power analysis for proportion tests # p1 <- baseline_completion_rate # p2 <- p1 + minimum_detectable_effect # # # Simplified calculation (use power.prop.test() for precise calculation) # pooled_p <- (p1 + p2) / 2 # pooled_variance <- pooled_p * (1 - pooled_p) # # z_alpha <- qnorm(1 - alpha / 2) # z_beta <- qnorm(power) # # n_per_group <- 2 * pooled_variance * (z_alpha + z_beta)^2 / (p2 - p1)^2 # # return( # list( # n_per_group = ceiling(n_per_group), # total_n = ceiling(2 * n_per_group), # assumptions = list( # baseline_rate = p1, # target_rate = p2, # effect_size = minimum_detectable_effect # ) # ) # ) # } # # # Analyze UX experiment results # analyze_ux_experiment <- function(experiment_data, experiment_design) { # # Primary analysis: task completion rate # completion_test <- prop.test( # x = c( # sum(experiment_data$control$completed), # sum(experiment_data$treatment$completed) # ), # n = c(nrow(experiment_data$control), nrow(experiment_data$treatment)) # ) # # # Secondary analysis: time to completion # time_test <- t.test( # experiment_data$treatment$completion_time, # experiment_data$control$completion_time, # alternative = "less" # Hypothesis: treatment is faster # ) # # # Effect size calculation # effect_size <- calculate_cohens_d( # experiment_data$treatment$completion_time, # experiment_data$control$completion_time # ) # # results <- list( # completion_rate_test = completion_test, # completion_time_test = time_test, # effect_size = effect_size, # practical_significance = assess_practical_significance( # completion_test, # time_test, # effect_size # ), # recommendation = generate_experiment_recommendation( # completion_test, # time_test, # effect_size # ) # ) # # return(results) # } # # # Example: Test progressive disclosure vs. full information display # progressive_disclosure_experiment <- function() { # # Current design: all information visible # current_design <- list( # name = "Full Information Display", # description = "All metrics and filters visible simultaneously", # implementation = "Traditional dashboard with all components loaded" # ) # # # Proposed design: progressive disclosure # proposed_design <- list( # name = "Progressive Disclosure", # description = "Key metrics first, additional details on request", # implementation = "Primary KPIs with 'Show Details' interactions" # ) # # # Hypothesis based on BID framework # hypothesis <- "Progressive disclosure will reduce cognitive load and improve task completion rate for dashboard users (based on Cognitive Load Theory and Choice Overload research)" # # experiment <- design_ux_experiment( # current_design = current_design, # proposed_design = proposed_design, # hypothesis = hypothesis # ) # # return(experiment) # } ## ----continuous-monitoring---------------------------------------------------- # # Create UX health monitoring system # create_ux_monitoring_system <- function( # dashboard_configs, # telemetry_connections) { # monitoring_system <- list( # dashboards = dashboard_configs, # telemetry_sources = telemetry_connections, # health_checks = define_ux_health_checks(), # alert_thresholds = define_alert_thresholds(), # reporting_schedule = "weekly" # ) # # return(monitoring_system) # } # # # Define UX health check metrics # define_ux_health_checks <- function() { # list( # cognitive_load_indicators = c( # "session_abandonment_rate", # "time_to_first_interaction", # "filter_usage_distribution", # "error_rate_by_component" # ), # user_success_metrics = c( # "task_completion_rate", # "time_to_insight", # "feature_adoption_rate", # "user_satisfaction_nps" # ), # behavioral_red_flags = c( # "rapid_repeated_clicks", # "excessive_back_navigation", # "long_pause_before_action", # "high_exit_rate_on_entry" # ) # ) # } # # # Automated UX health reporting # generate_ux_health_report <- function(monitoring_system, time_period = "week") { # health_data <- map( # monitoring_system$telemetry_sources, # function(source) { # issues <- bid_telemetry(source$path, time_filter = time_period) # # health_scores <- calculate_ux_health_scores(issues) # trend_analysis <- calculate_ux_trends(issues, source$historical_data) # # list( # dashboard = source$dashboard_name, # current_health = health_scores, # trends = trend_analysis, # recommendations = generate_health_recommendations( # health_scores, # trend_analysis # ) # ) # } # ) # # # Portfolio-level insights # portfolio_health <- aggregate_portfolio_health(health_data) # # # Generate executive summary # executive_summary <- create_ux_executive_summary(portfolio_health) # # report <- list( # period = time_period, # executive_summary = executive_summary, # dashboard_details = health_data, # portfolio_trends = portfolio_health, # action_items = prioritize_ux_improvements(health_data) # ) # # return(report) # } # # # Calculate UX health scores # calculate_ux_health_scores <- function(telemetry_issues) { # # Weight issues by severity and impact # severity_weights <- c("critical" = 5, "high" = 3, "medium" = 2, "low" = 1) # # issue_impact <- telemetry_issues |> # mutate( # weighted_impact = case_when( # severity == "critical" ~ 5, # severity == "high" ~ 3, # severity == "medium" ~ 2, # TRUE ~ 1 # ) # ) |> # summarize( # total_issues = n(), # weighted_impact_score = sum(weighted_impact), # critical_issues = sum(severity == "critical"), # .groups = "drop" # ) # # # Calculate health score (0-100, higher is better) # health_score <- pmax(0, 100 - (issue_impact$weighted_impact_score * 2)) # # health_rating <- case_when( # health_score >= 85 ~ "Excellent", # health_score >= 70 ~ "Good", # health_score >= 55 ~ "Fair", # TRUE ~ "Needs Attention" # ) # # return( # list( # score = health_score, # rating = health_rating, # issue_breakdown = issue_impact, # primary_concerns = get_primary_concerns(telemetry_issues) # ) # ) # } # # # Example implementation # monitor_dashboard_portfolio <- function() { # # Set up monitoring for multiple dashboards # portfolio_monitoring <- create_ux_monitoring_system( # dashboard_configs = list( # list(name = "Executive Dashboard", business_unit = "Corporate"), # list(name = "Sales Analytics", business_unit = "Sales"), # list(name = "Marketing Performance", business_unit = "Marketing") # ), # telemetry_connections = list( # list( # dashboard_name = "Executive Dashboard", # path = "exec_dashboard_telemetry.sqlite", # historical_data = "exec_dashboard_history.rds" # ), # list( # dashboard_name = "Sales Analytics", # path = "sales_dashboard_telemetry.sqlite", # historical_data = "sales_dashboard_history.rds" # ) # ) # ) # # # Generate weekly health report # weekly_report <- generate_ux_health_report(portfolio_monitoring) # # return(weekly_report) # } ## ----custom-bid-stages-------------------------------------------------------- # # Create custom BID stage for specific domains # create_custom_bid_stage <- function( # stage_name, # stage_function, # validation_rules) { # # Example: Security-focused BID stage for sensitive data dashboards # bid_security_stage <- function( # previous_stage, # security_requirements = NULL, # compliance_framework = "GDPR", # data_sensitivity_level = "medium") { # validate_previous_stage(previous_stage, stage_name) # # # Security-specific analysis # security_analysis <- assess_dashboard_security_ux( # previous_stage = previous_stage, # requirements = security_requirements, # framework = compliance_framework, # sensitivity = data_sensitivity_level # ) # # # Generate security-aware UX recommendations # security_recommendations <- generate_security_ux_recommendations( # security_analysis, # previous_stage # ) # # # Create result tibble # result_data <- tibble( # stage = stage_name, # security_level = data_sensitivity_level, # compliance_framework = compliance_framework, # security_recommendations = paste( # security_recommendations, # collapse = "; " # ), # previous_layout = safe_column_access(previous_stage, "layout"), # timestamp = Sys.time() # ) # # # Return as bid_stage object # return(bid_stage(stage_name, result_data)) # } # # return(bid_security_stage) # } # # # Example: Accessibility-focused BID stage # bid_accessibility <- function( # previous_stage, # wcag_level = "AA", # assistive_tech_support = TRUE, # target_disabilities = c("visual", "motor", "cognitive")) { # validate_previous_stage(previous_stage, "Accessibility") # # # Comprehensive accessibility analysis # accessibility_audit <- perform_accessibility_audit( # previous_stage = previous_stage, # wcag_level = wcag_level, # target_disabilities = target_disabilities # ) # # # Generate specific recommendations # a11y_recommendations <- generate_accessibility_recommendations( # audit_results = accessibility_audit, # layout = safe_column_access(previous_stage, "layout"), # existing_concepts = safe_column_access(previous_stage, "concepts") # ) # # result_data <- tibble( # stage = "Accessibility", # wcag_level = wcag_level, # assistive_tech_support = assistive_tech_support, # accessibility_score = accessibility_audit$overall_score, # recommendations = paste(a11y_recommendations, collapse = "; "), # critical_issues = accessibility_audit$critical_issues_count, # timestamp = Sys.time() # ) # # return(bid_stage("Accessibility", result_data)) # } # # # Integration with main BID workflow # extended_bid_workflow <- function(config) { # # Standard BID stages # interpret_stage <- bid_interpret( # central_question = config$central_question, # data_story = config$data_story # ) # # notice_stage <- bid_notice( # previous_stage = interpret_stage, # problem = config$problem, # evidence = config$evidence # ) # # anticipate_stage <- bid_anticipate( # previous_stage = notice_stage, # bias_mitigations = config$bias_mitigations # ) # # structure_stage <- bid_structure(previous_stage = anticipate_stage) # # # Custom stages # if (config$include_accessibility) { # accessibility_stage <- bid_accessibility( # previous_stage = structure_stage, # wcag_level = config$accessibility_requirements$wcag_level # ) # final_stage <- accessibility_stage # } else { # final_stage <- structure_stage # } # # # Validation with all insights # validate_stage <- bid_validate( # previous_stage = final_stage, # summary_panel = config$summary_panel, # next_steps = config$next_steps # ) # # return(validate_stage) # } ## ----data-science-integration------------------------------------------------- # # Integrate BID into standard data science project structure # create_bid_project_template <- function( # project_name, # project_type = "dashboard") { # project_structure <- list( # "01-data-exploration/" = "Standard EDA and data validation", # "02-user-research/" = "BID Stage 1 (Interpret) - user needs analysis", # "03-problem-identification/" = "BID Stage 2 (Notice) - friction point analysis", # "04-behavioral-analysis/" = "BID Stage 3 (Anticipate) - bias mitigation planning", # "05-interface-design/" = "BID Stage 4 (Structure) - layout and UX design", # "06-validation-testing/" = "BID Stage 5 (Validate) - user testing and iteration", # "07-telemetry-analysis/" = "Post-deployment UX monitoring and improvement", # "bid_analysis.R" = "Consolidated BID framework application", # "ux_monitoring.R" = "Automated UX health monitoring", # "README.md" = "Project documentation including BID insights" # ) # # return(project_structure) # } # # # Template for BID-informed data science projects # bid_data_science_workflow <- function(project_config) { # workflow <- list( # # Phase 1: Data + User Understanding # phase_1 = list( # data_exploration = "Standard EDA process", # user_research = bid_interpret( # central_question = project_config$research_question, # data_story = project_config$data_narrative, # user_personas = project_config$stakeholders # ) # ), # # # Phase 2: Problem Definition # phase_2 = list( # statistical_analysis = "Model building and validation", # ux_problem_identification = bid_notice( # previous_stage = workflow$phase_1$user_research, # problem = project_config$interface_challenges, # evidence = project_config$user_feedback # ) # ), # # # Phase 3: Solution Design # phase_3 = list( # model_interpretation = "Feature importance and model explanation", # behavioral_considerations = bid_anticipate( # previous_stage = workflow$phase_2$ux_problem_identification, # bias_mitigations = project_config$cognitive_considerations # ), # interface_structure = bid_structure( # previous_stage = workflow$phase_3$behavioral_considerations # ) # ), # # # Phase 4: Validation & Deployment # phase_4 = list( # model_validation = "Standard model performance validation", # ux_validation = bid_validate( # previous_stage = workflow$phase_3$interface_structure, # summary_panel = project_config$success_criteria, # next_steps = project_config$iteration_plan # ) # ) # ) # # return(workflow) # } ## ----documentation-practices-------------------------------------------------- # # Create comprehensive BID documentation # document_bid_decisions <- function(bid_result, project_context) { # documentation <- list( # project_overview = project_context, # bid_stages_summary = extract_bid_summary(bid_result), # key_decisions = extract_key_decisions(bid_result), # behavioral_science_rationale = extract_behavioral_rationale(bid_result), # implementation_guidelines = generate_implementation_guide(bid_result), # success_metrics = define_success_metrics(bid_result), # iteration_plan = create_iteration_plan(bid_result) # ) # # return(documentation) # } ## ----collaborative-workflows-------------------------------------------------- # # Enable team collaboration on BID analysis # create_bid_collaboration_workflow <- function(team_members, project_config) { # workflow <- list( # stakeholder_input = collect_stakeholder_perspectives(team_members), # expert_review = facilitate_expert_review_process(project_config), # consensus_building = build_consensus_on_bid_decisions(team_members), # implementation_coordination = coordinate_implementation_tasks(team_members) # ) # # return(workflow) # } ## ----continuous-learning------------------------------------------------------ # # Build organizational BID knowledge # build_bid_knowledge_base <- function(completed_projects) { # knowledge_base <- map_dfr( # completed_projects, # function(project) { # extract_lessons_learned(project$bid_analysis, project$outcomes) # } # ) # # # Identify patterns and best practices # patterns <- identify_successful_patterns(knowledge_base) # anti_patterns <- identify_problematic_patterns(knowledge_base) # # return( # list( # knowledge_base = knowledge_base, # successful_patterns = patterns, # anti_patterns = anti_patterns, # recommendations = generate_org_recommendations(patterns, anti_patterns) # ) # ) # }