[
  {
    "title": "Summarization",
    "header": [
      {
        "value": "Model/adapter",
        "markdown": false,
        "metadata": {}
      },
      {
        "value": "Summarization - Faithfulness",
        "description": "summarization\n\nFaithfulness: Whether all the information expressed by the summary can be inferred from the source transcript.",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Faithfulness",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization - Relevance",
        "description": "summarization\n\nRelevance: Whether the summary includes only important information from the source.",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Relevance",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization - Coherence",
        "description": "summarization\n\nCoherence: Whether the summary organizes the relevant information into a well-structured summary.",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Coherence",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization (Real) - Faithfulness",
        "description": "Summarization with real call transcripts\n\nFaithfulness: Whether all the information expressed by the summary can be inferred from the source transcript.",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Faithfulness",
          "run_group": "Summarization (Real)"
        }
      },
      {
        "value": "Summarization (Real) - Relevance",
        "description": "Summarization with real call transcripts\n\nRelevance: Whether the summary includes only important information from the source.",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Relevance",
          "run_group": "Summarization (Real)"
        }
      },
      {
        "value": "Summarization (Real) - Coherence",
        "description": "Summarization with real call transcripts\n\nCoherence: Whether the summary organizes the relevant information into a well-structured summary.",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Coherence",
          "run_group": "Summarization (Real)"
        }
      }
    ],
    "rows": [
      [
        {
          "value": "Llama 3 Instruct (70B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        }
      ],
      [
        {
          "value": "Llama 3 Instruct (8B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        }
      ],
      [
        {
          "value": "Claude 3.5 Sonnet (20240620)",
          "description": "",
          "markdown": false
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        }
      ],
      [
        {
          "value": "GPT-4o (2024-05-13)",
          "description": "",
          "markdown": false
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 0.95,
          "description": "min=0.95, mean=0.95, max=0.95, sum=0.95 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        }
      ],
      [
        {
          "value": "GPT-4o mini (2024-07-18)",
          "description": "",
          "markdown": false
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 1.0,
          "description": "min=1, mean=1, max=1, sum=1 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        }
      ]
    ],
    "links": [
      {
        "text": "LaTeX",
        "href": "benchmark_output/releases/v0.1.0/groups/latex/call_center_scenarios_summarization_metrics.tex"
      },
      {
        "text": "JSON",
        "href": "benchmark_output/releases/v0.1.0/groups/json/call_center_scenarios_summarization_metrics.json"
      }
    ],
    "name": "summarization_metrics"
  },
  {
    "title": "Efficiency",
    "header": [
      {
        "value": "Model/adapter",
        "markdown": false,
        "metadata": {}
      },
      {
        "value": "Mean win rate",
        "description": "How many models this model outperform on average (over columns).",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {}
      }
    ],
    "rows": [
      [
        {
          "value": "Llama 3 Instruct (70B)",
          "description": "",
          "markdown": false
        },
        {
          "markdown": false
        }
      ],
      [
        {
          "value": "Llama 3 Instruct (8B)",
          "description": "",
          "markdown": false
        },
        {
          "markdown": false
        }
      ],
      [
        {
          "value": "Claude 3.5 Sonnet (20240620)",
          "description": "",
          "markdown": false
        },
        {
          "markdown": false
        }
      ],
      [
        {
          "value": "GPT-4o (2024-05-13)",
          "description": "",
          "markdown": false
        },
        {
          "markdown": false
        }
      ],
      [
        {
          "value": "GPT-4o mini (2024-07-18)",
          "description": "",
          "markdown": false
        },
        {
          "markdown": false
        }
      ]
    ],
    "links": [
      {
        "text": "LaTeX",
        "href": "benchmark_output/releases/v0.1.0/groups/latex/call_center_scenarios_efficiency.tex"
      },
      {
        "text": "JSON",
        "href": "benchmark_output/releases/v0.1.0/groups/json/call_center_scenarios_efficiency.json"
      }
    ],
    "name": "efficiency"
  },
  {
    "title": "General information",
    "header": [
      {
        "value": "Model/adapter",
        "markdown": false,
        "metadata": {}
      },
      {
        "value": "Summarization - # eval",
        "description": "summarization\n\n# eval: Number of evaluation instances.",
        "markdown": false,
        "metadata": {
          "metric": "# eval",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization - # train",
        "description": "summarization\n\n# train: Number of training instances (e.g., in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "# train",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization - truncated",
        "description": "summarization\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "truncated",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization - # prompt tokens",
        "description": "summarization\n\n# prompt tokens: Number of tokens in the prompt.",
        "markdown": false,
        "metadata": {
          "metric": "# prompt tokens",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization - # output tokens",
        "description": "summarization\n\n# output tokens: Actual number of output tokens.",
        "markdown": false,
        "metadata": {
          "metric": "# output tokens",
          "run_group": "Summarization"
        }
      },
      {
        "value": "Summarization (Real) - # eval",
        "description": "Summarization with real call transcripts\n\n# eval: Number of evaluation instances.",
        "markdown": false,
        "metadata": {
          "metric": "# eval",
          "run_group": "Summarization (Real)"
        }
      },
      {
        "value": "Summarization (Real) - # train",
        "description": "Summarization with real call transcripts\n\n# train: Number of training instances (e.g., in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "# train",
          "run_group": "Summarization (Real)"
        }
      },
      {
        "value": "Summarization (Real) - truncated",
        "description": "Summarization with real call transcripts\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "truncated",
          "run_group": "Summarization (Real)"
        }
      },
      {
        "value": "Summarization (Real) - # prompt tokens",
        "description": "Summarization with real call transcripts\n\n# prompt tokens: Number of tokens in the prompt.",
        "markdown": false,
        "metadata": {
          "metric": "# prompt tokens",
          "run_group": "Summarization (Real)"
        }
      },
      {
        "value": "Summarization (Real) - # output tokens",
        "description": "Summarization with real call transcripts\n\n# output tokens: Actual number of output tokens.",
        "markdown": false,
        "metadata": {
          "metric": "# output tokens",
          "run_group": "Summarization (Real)"
        }
      },
      {
        "value": "Summarization (Pairwise) - # eval",
        "description": "summarization\n\n# eval: Number of evaluation instances.",
        "markdown": false,
        "metadata": {
          "metric": "# eval",
          "run_group": "Summarization (Pairwise)"
        }
      },
      {
        "value": "Summarization (Pairwise) - # train",
        "description": "summarization\n\n# train: Number of training instances (e.g., in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "# train",
          "run_group": "Summarization (Pairwise)"
        }
      },
      {
        "value": "Summarization (Pairwise) - truncated",
        "description": "summarization\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "truncated",
          "run_group": "Summarization (Pairwise)"
        }
      },
      {
        "value": "Summarization (Pairwise) - # prompt tokens",
        "description": "summarization\n\n# prompt tokens: Number of tokens in the prompt.",
        "markdown": false,
        "metadata": {
          "metric": "# prompt tokens",
          "run_group": "Summarization (Pairwise)"
        }
      },
      {
        "value": "Summarization (Pairwise) - # output tokens",
        "description": "summarization\n\n# output tokens: Actual number of output tokens.",
        "markdown": false,
        "metadata": {
          "metric": "# output tokens",
          "run_group": "Summarization (Pairwise)"
        }
      },
      {
        "value": "Summarization (Key Points Recall) - # eval",
        "description": "summarization\n\n# eval: Number of evaluation instances.",
        "markdown": false,
        "metadata": {
          "metric": "# eval",
          "run_group": "Summarization (Key Points Recall)"
        }
      },
      {
        "value": "Summarization (Key Points Recall) - # train",
        "description": "summarization\n\n# train: Number of training instances (e.g., in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "# train",
          "run_group": "Summarization (Key Points Recall)"
        }
      },
      {
        "value": "Summarization (Key Points Recall) - truncated",
        "description": "summarization\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
        "markdown": false,
        "metadata": {
          "metric": "truncated",
          "run_group": "Summarization (Key Points Recall)"
        }
      },
      {
        "value": "Summarization (Key Points Recall) - # prompt tokens",
        "description": "summarization\n\n# prompt tokens: Number of tokens in the prompt.",
        "markdown": false,
        "metadata": {
          "metric": "# prompt tokens",
          "run_group": "Summarization (Key Points Recall)"
        }
      },
      {
        "value": "Summarization (Key Points Recall) - # output tokens",
        "description": "summarization\n\n# output tokens: Actual number of output tokens.",
        "markdown": false,
        "metadata": {
          "metric": "# output tokens",
          "run_group": "Summarization (Key Points Recall)"
        }
      }
    ],
    "rows": [
      [
        {
          "value": "Llama 3 Instruct (70B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 731.5833333333334,
          "description": "min=731.583, mean=731.583, max=731.583, sum=731.583 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 169.04166666666666,
          "description": "min=169.042, mean=169.042, max=169.042, sum=169.042 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 5.0,
          "description": "min=5, mean=5, max=5, sum=5 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 1688.0,
          "description": "min=1688, mean=1688, max=1688, sum=1688 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 171.0,
          "description": "min=171, mean=171, max=171, sum=171 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 731.5833333333334,
          "description": "min=731.583, mean=731.583, max=731.583, sum=731.583 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 169.7125,
          "description": "min=169.713, mean=169.713, max=169.713, sum=169.713 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 731.5833333333334,
          "description": "min=731.583, mean=731.583, max=731.583, sum=731.583 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-70b-chat"
          ]
        },
        {
          "value": 169.7125,
          "description": "min=169.713, mean=169.713, max=169.713, sum=169.713 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-70b-chat"
          ]
        }
      ],
      [
        {
          "value": "Llama 3 Instruct (8B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 731.5833333333334,
          "description": "min=731.583, mean=731.583, max=731.583, sum=731.583 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 173.4125,
          "description": "min=173.412, mean=173.412, max=173.412, sum=173.412 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 5.0,
          "description": "min=5, mean=5, max=5, sum=5 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 1688.0,
          "description": "min=1688, mean=1688, max=1688, sum=1688 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 185.2,
          "description": "min=185.2, mean=185.2, max=185.2, sum=185.2 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 731.5833333333334,
          "description": "min=731.583, mean=731.583, max=731.583, sum=731.583 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 174.11666666666667,
          "description": "min=174.117, mean=174.117, max=174.117, sum=174.117 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 731.5833333333334,
          "description": "min=731.583, mean=731.583, max=731.583, sum=731.583 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-8b-chat"
          ]
        },
        {
          "value": 174.11666666666667,
          "description": "min=174.117, mean=174.117, max=174.117, sum=174.117 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-8b-chat"
          ]
        }
      ],
      [
        {
          "value": "Claude 3.5 Sonnet (20240620)",
          "description": "",
          "markdown": false
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 801.1083333333333,
          "description": "min=801.108, mean=801.108, max=801.108, sum=801.108 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 151.3,
          "description": "min=151.3, mean=151.3, max=151.3, sum=151.3 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 5.0,
          "description": "min=5, mean=5, max=5, sum=5 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 1726.4,
          "description": "min=1726.4, mean=1726.4, max=1726.4, sum=1726.4 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 173.0,
          "description": "min=173, mean=173, max=173, sum=173 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 801.1083333333333,
          "description": "min=801.108, mean=801.108, max=801.108, sum=801.108 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 151.10833333333332,
          "description": "min=151.108, mean=151.108, max=151.108, sum=151.108 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 801.1083333333333,
          "description": "min=801.108, mean=801.108, max=801.108, sum=801.108 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        },
        {
          "value": 151.10833333333332,
          "description": "min=151.108, mean=151.108, max=151.108, sum=151.108 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        }
      ],
      [
        {
          "value": "GPT-4o (2024-05-13)",
          "description": "",
          "markdown": false
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 728.7625,
          "description": "min=728.763, mean=728.763, max=728.763, sum=728.763 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 155.52916666666667,
          "description": "min=155.529, mean=155.529, max=155.529, sum=155.529 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 5.0,
          "description": "min=5, mean=5, max=5, sum=5 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 1642.6,
          "description": "min=1642.6, mean=1642.6, max=1642.6, sum=1642.6 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 142.8,
          "description": "min=142.8, mean=142.8, max=142.8, sum=142.8 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 728.7625,
          "description": "min=728.763, mean=728.763, max=728.763, sum=728.763 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "value": 154.72916666666666,
          "description": "min=154.729, mean=154.729, max=154.729, sum=154.729 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-2024-05-13"
          ]
        },
        {
          "description": "No matching runs",
          "markdown": false
        },
        {
          "description": "No matching runs",
          "markdown": false
        },
        {
          "description": "No matching runs",
          "markdown": false
        },
        {
          "description": "No matching runs",
          "markdown": false
        },
        {
          "description": "No matching runs",
          "markdown": false
        }
      ],
      [
        {
          "value": "GPT-4o mini (2024-07-18)",
          "description": "",
          "markdown": false
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 728.7625,
          "description": "min=728.763, mean=728.763, max=728.763, sum=728.763 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 178.33333333333334,
          "description": "min=178.333, mean=178.333, max=178.333, sum=178.333 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 5.0,
          "description": "min=5, mean=5, max=5, sum=5 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 1642.6,
          "description": "min=1642.6, mean=1642.6, max=1642.6, sum=1642.6 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 164.0,
          "description": "min=164, mean=164, max=164, sum=164 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization:subset=real_call_transcripts,model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 728.7625,
          "description": "min=728.763, mean=728.763, max=728.763, sum=728.763 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 179.03333333333333,
          "description": "min=179.033, mean=179.033, max=179.033, sum=179.033 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 240.0,
          "description": "min=240, mean=240, max=240, sum=240 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 0.0,
          "description": "min=0, mean=0, max=0, sum=0 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 728.7625,
          "description": "min=728.763, mean=728.763, max=728.763, sum=728.763 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=openai_gpt-4o-mini-2024-07-18"
          ]
        },
        {
          "value": 179.03333333333333,
          "description": "min=179.033, mean=179.033, max=179.033, sum=179.033 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=openai_gpt-4o-mini-2024-07-18"
          ]
        }
      ]
    ],
    "links": [
      {
        "text": "LaTeX",
        "href": "benchmark_output/releases/v0.1.0/groups/latex/call_center_scenarios_general_information.tex"
      },
      {
        "text": "JSON",
        "href": "benchmark_output/releases/v0.1.0/groups/json/call_center_scenarios_general_information.json"
      }
    ],
    "name": "general_information"
  },
  {
    "title": "Pairwise Comparison",
    "header": [
      {
        "value": "Model/adapter",
        "markdown": false,
        "metadata": {}
      },
      {
        "value": "Summarization (Pairwise) - Pairwise",
        "description": "summarization\n\nPairwise: Whether the model's summary was preferred by the evaluator model",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Pairwise",
          "run_group": "Summarization (Pairwise)"
        }
      }
    ],
    "rows": [
      [
        {
          "value": "Llama 3 Instruct (70B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.9333333333333333,
          "description": "min=0.933, mean=0.933, max=0.933, sum=0.933 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-70b-chat"
          ]
        }
      ],
      [
        {
          "value": "Llama 3 Instruct (8B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.9708333333333333,
          "description": "min=0.971, mean=0.971, max=0.971, sum=0.971 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=meta_llama-3-8b-chat"
          ]
        }
      ],
      [
        {
          "value": "Claude 3.5 Sonnet (20240620)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.8958333333333334,
          "description": "min=0.896, mean=0.896, max=0.896, sum=0.896 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        }
      ],
      [
        {
          "value": "GPT-4o (2024-05-13)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.6375,
          "description": "min=0.637, mean=0.637, max=0.637, sum=0.637 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-2024-05-13"
          ]
        }
      ],
      [
        {
          "value": "GPT-4o mini (2024-07-18)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.4083333333333333,
          "description": "min=0.408, mean=0.408, max=0.408, sum=0.408 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_pairwise_comparison:model=openai_gpt-4o-mini-2024-07-18"
          ]
        }
      ]
    ],
    "links": [
      {
        "text": "LaTeX",
        "href": "benchmark_output/releases/v0.1.0/groups/latex/call_center_scenarios_pairwise_comparison_metrics.tex"
      },
      {
        "text": "JSON",
        "href": "benchmark_output/releases/v0.1.0/groups/json/call_center_scenarios_pairwise_comparison_metrics.json"
      }
    ],
    "name": "pairwise_comparison_metrics"
  },
  {
    "title": "Key Points Recall",
    "header": [
      {
        "value": "Model/adapter",
        "markdown": false,
        "metadata": {}
      },
      {
        "value": "Summarization (Key Points Recall) - Recall",
        "description": "summarization\n\nRecall: How many key items were recalled",
        "markdown": false,
        "lower_is_better": false,
        "metadata": {
          "metric": "Recall",
          "run_group": "Summarization (Key Points Recall)"
        }
      }
    ],
    "rows": [
      [
        {
          "value": "Llama 3 Instruct (70B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.8087500000000011,
          "description": "min=0.809, mean=0.809, max=0.809, sum=0.809 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-70b-chat"
          ]
        }
      ],
      [
        {
          "value": "Llama 3 Instruct (8B)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.784583333333334,
          "description": "min=0.785, mean=0.785, max=0.785, sum=0.785 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=meta_llama-3-8b-chat"
          ]
        }
      ],
      [
        {
          "value": "Claude 3.5 Sonnet (20240620)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.8287500000000013,
          "description": "min=0.829, mean=0.829, max=0.829, sum=0.829 (1)",
          "style": {},
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=anthropic_claude-3-5-sonnet-20240620"
          ]
        }
      ],
      [
        {
          "value": "GPT-4o (2024-05-13)",
          "description": "",
          "markdown": false
        },
        {
          "description": "No matching runs",
          "markdown": false
        }
      ],
      [
        {
          "value": "GPT-4o mini (2024-07-18)",
          "description": "",
          "markdown": false
        },
        {
          "value": 0.9044907407407423,
          "description": "min=0.904, mean=0.904, max=0.904, sum=0.904 (1)",
          "style": {
            "font-weight": "bold"
          },
          "markdown": false,
          "run_spec_names": [
            "call_center_summarization_key_points_recall:model=openai_gpt-4o-mini-2024-07-18"
          ]
        }
      ]
    ],
    "links": [
      {
        "text": "LaTeX",
        "href": "benchmark_output/releases/v0.1.0/groups/latex/call_center_scenarios_key_points_recall_metrics.tex"
      },
      {
        "text": "JSON",
        "href": "benchmark_output/releases/v0.1.0/groups/json/call_center_scenarios_key_points_recall_metrics.json"
      }
    ],
    "name": "key_points_recall_metrics"
  }
]