Selaa lähdekoodia

new_add_linkselfie__update

Shun Yamachika 2 kuukautta sitten
vanhempi
commit
c89d77ec67
100 muutettua tiedostoa jossa 1960 lisäystä ja 5621 poistoa
  1. 0 25
      add_linkselfie/#pickele#
  2. 0 1
      add_linkselfie/.#pickele
  3. 0 4
      add_linkselfie/.gitignore
  4. 0 100
      add_linkselfie/MEMO.org
  5. 0 99
      add_linkselfie/MEMO.org~
  6. BIN
      add_linkselfie/__pycache__/evaluation.cpython-38.pyc
  7. 0 20
      add_linkselfie/algorithms/__init__.py
  8. BIN
      add_linkselfie/algorithms/__pycache__/__init__.cpython-38.pyc
  9. BIN
      add_linkselfie/algorithms/__pycache__/lonline_nb.cpython-38.pyc
  10. BIN
      add_linkselfie/algorithms/__pycache__/naive_nb.cpython-38.pyc
  11. BIN
      add_linkselfie/algorithms/__pycache__/online_nb.cpython-38.pyc
  12. BIN
      add_linkselfie/algorithms/__pycache__/succ_elim_nb.cpython-38.pyc
  13. 0 40
      add_linkselfie/algorithms/lnaive_nb.py
  14. 0 20
      add_linkselfie/algorithms/lnaive_nb.py~
  15. 0 68
      add_linkselfie/algorithms/lonline_nb.py
  16. 0 45
      add_linkselfie/algorithms/lonline_nb.py~
  17. 0 27
      add_linkselfie/algorithms/memo.txt
  18. 0 5
      add_linkselfie/algorithms/memo.txt~
  19. 0 20
      add_linkselfie/algorithms/naive_nb.py
  20. 0 51
      add_linkselfie/algorithms/online_nb.py
  21. 0 60
      add_linkselfie/algorithms/succ_elim_nb.py
  22. 0 20
      add_linkselfie/dump.txt
  23. 0 1
      add_linkselfie/evalationmemo.txt~
  24. 0 575
      add_linkselfie/evaluation.py
  25. 0 852
      add_linkselfie/evaluationold.py
  26. 0 24
      add_linkselfie/fidelity.py
  27. 0 105
      add_linkselfie/main.py
  28. 0 145
      add_linkselfie/mainold.py
  29. 0 154
      add_linkselfie/memo.org
  30. 0 37
      add_linkselfie/memo.org~
  31. 0 158
      add_linkselfie/memo.txt
  32. 0 108
      add_linkselfie/memo.txt~
  33. BIN
      add_linkselfie/outputs/plot_accuracy_vs_budget_Depolar.pickle
  34. BIN
      add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean.pickle
  35. 0 361
      add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean_gaps.csv
  36. 0 361
      add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean_widths.csv
  37. BIN
      add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean.pickle
  38. 0 361
      add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean_gaps.csv
  39. 0 361
      add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean_widths.csv
  40. BIN
      add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_max.pickle
  41. 0 361
      add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_max_widths.csv
  42. BIN
      add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_mean.pickle
  43. 0 361
      add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_mean_widths.csv
  44. BIN
      add_linkselfie/outputs/plot_minwidthsum_perpair_vs_budget_Depolar.pickle
  45. BIN
      add_linkselfie/outputs/plot_minwidthsum_perpair_weighted_vs_budget_Depolar.pickle
  46. BIN
      add_linkselfie/outputs/plot_widthsum_alllinks_vs_budget_Depolar.pickle
  47. BIN
      add_linkselfie/outputs/plot_widthsum_alllinks_weighted_vs_budget_Depolar.pickle
  48. 0 74
      add_linkselfie/piclecsv.py
  49. 0 29
      add_linkselfie/piclecsv.py~
  50. BIN
      add_linkselfie/plot_accuracy_vs_budget_Depolar.pdf
  51. BIN
      add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean.pdf
  52. BIN
      add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean_gap.pdf
  53. BIN
      add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean.pdf
  54. BIN
      add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean_gap.pdf
  55. BIN
      add_linkselfie/plot_ciwidth_vs_budget_Depolar_max.pdf
  56. BIN
      add_linkselfie/plot_ciwidth_vs_budget_Depolar_mean.pdf
  57. BIN
      add_linkselfie/plot_minwidthsum_perpair_vs_budget_Depolar.pdf
  58. BIN
      add_linkselfie/plot_minwidthsum_perpair_weighted_vs_budget_Depolar.pdf
  59. BIN
      add_linkselfie/plot_value_vs_budget_Depolar.pdf
  60. BIN
      add_linkselfie/plot_value_vs_budget_target_Depolar.pdf
  61. BIN
      add_linkselfie/plot_value_vs_used_Depolar.pdf
  62. BIN
      add_linkselfie/plot_weighted_errorrate_vs_budget_Depolar.pdf
  63. BIN
      add_linkselfie/plot_widthsum_alllinks_vs_budget_Depolar.pdf
  64. BIN
      add_linkselfie/plot_widthsum_alllinks_weighted_vs_budget_Depolar.pdf
  65. BIN
      add_linkselfie/schedulers/__pycache__/__init__.cpython-38.pyc
  66. BIN
      add_linkselfie/schedulers/__pycache__/greedy_scheduler.cpython-38.pyc
  67. BIN
      add_linkselfie/schedulers/__pycache__/lonline_nb.cpython-38.pyc
  68. BIN
      add_linkselfie/schedulers/__pycache__/pac_greedy_scheduler.cpython-38.pyc
  69. BIN
      add_linkselfie/schedulers/__pycache__/pac_naive_scheduler.cpython-38.pyc
  70. BIN
      add_linkselfie/schedulers/__pycache__/pac_wrapper.cpython-38.pyc
  71. 0 95
      add_linkselfie/schedulers/greedy_scheduler.py
  72. 0 113
      add_linkselfie/schedulers/lonline_nb.py
  73. 0 35
      add_linkselfie/schedulers/memo.txt
  74. 0 11
      add_linkselfie/schedulers/memo.txt~
  75. 0 210
      add_linkselfie/simulation.py
  76. 0 124
      add_linkselfie/simulation.py~
  77. 0 0
      new_add_linkselfie/LICENSE
  78. 0 0
      new_add_linkselfie/README.md
  79. BIN
      new_add_linkselfie/__pycache__/evaluation.cpython-38.pyc
  80. BIN
      new_add_linkselfie/__pycache__/evaluationgap.cpython-38.pyc
  81. BIN
      new_add_linkselfie/__pycache__/evaluationpair.cpython-38.pyc
  82. BIN
      new_add_linkselfie/__pycache__/nb_protocol.cpython-38.pyc
  83. BIN
      new_add_linkselfie/__pycache__/network.cpython-38.pyc
  84. BIN
      new_add_linkselfie/__pycache__/utils.cpython-38.pyc
  85. 69 0
      new_add_linkselfie/convert.py
  86. 37 0
      new_add_linkselfie/convert.py~
  87. 27 0
      new_add_linkselfie/evalationmemo.txt
  88. 515 0
      new_add_linkselfie/evaluation.py
  89. 420 0
      new_add_linkselfie/evaluationgap.py
  90. 357 0
      new_add_linkselfie/evaluationgap.py~
  91. 339 0
      new_add_linkselfie/evaluationpair.py
  92. 20 0
      new_add_linkselfie/groups.org
  93. 176 0
      new_add_linkselfie/main.py
  94. BIN
      new_add_linkselfie/metrics/__pycache__/widths.cpython-38.pyc
  95. 0 0
      new_add_linkselfie/metrics/widths.py
  96. 0 0
      new_add_linkselfie/metrics/widths.py~
  97. 0 0
      new_add_linkselfie/nb_protocol.py
  98. 0 0
      new_add_linkselfie/network.py
  99. BIN
      new_add_linkselfie/outputs/plot_accuracy_vs_budget_Depolar.pdf
  100. BIN
      new_add_linkselfie/outputs/plot_value_vs_budget_Depolar.pdf

+ 0 - 25
add_linkselfie/#pickele#

@@ -1,25 +0,0 @@
-python - <<'PY'
-import sys, json, pickle
-from pathlib import Path
-import numpy as np
-
-def to_jsonable(x):
-    if isinstance(x, dict):
-        return {str(k): to_jsonable(v) for k,v in x.items()}
-    if isinstance(x, (list, tuple, set)):
-        return [to_jsonable(v) for v in x]
-    if isinstance(x, (np.integer,)): return int(x)
-    if isinstance(x, (np.floating,)): return float(x)
-    if isinstance(x, (np.ndarray,)): return x.tolist()
-    try:
-        json.dumps(x); return x
-    except Exception:
-        return str(x)
-
-path = Path(sys.argv[1])
-with open(path,"rb") as f:
-    obj = pickle.load(f)
-out = path.with_suffix(".json")
-out.write_text(json.dumps(to_jsonable(obj), ensure_ascii=False, indent=2))
-print("Wrote:", out)
-PY outputs/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean.pickle

+ 0 - 1
add_linkselfie/.#pickele

@@ -1 +0,0 @@
-shun@a.1236:1757736525

+ 0 - 4
add_linkselfie/.gitignore

@@ -1,4 +0,0 @@
-__pycache__/
-*.pdf
-outputs/
-.DS_Store

+ 0 - 100
add_linkselfie/MEMO.org

@@ -1,100 +0,0 @@
-# add_linkselfie – I/O & Loops memo
-
-## main.py
-**Inputs**
-- 実験パラメータ: `budget_list`, `scheduler_names`, `noise_model_names`, `node_path_list`, `importance_list`, `bounces`, `repeat`
-
-**Outputs**
-- `evaluation.py` の各関数を並列実行 → `outputs/*.pdf` を生成
-
-**Loops**
-- `for noise_model in noise_model_names:`
-  - `Pool.apply_async(...)` で
-    - `plot_accuracy_vs_budget(...)`
-    - `plot_value_vs_used(...)`
-    - `plot_value_vs_budget_target(...)`
-  - 最後に `join()` して回収
-
----
-
-## evaluation.py
-### plot_accuracy_vs_budget(...)
-**Inputs**: 予算列・スケジューラ名・ノイズモデル・ノード/重要度・`bounces`・`repeat`  
-**Outputs**: `outputs/plot_accuracy_vs_budget_{noise_model}.pdf`  
-**Loops**:
-- `for C_total in budget_list:`  
-  - `for r in range(repeat):`
-    - トポロジ生成(ペアごとに fidelity リスト)
-    - `for name in scheduler_names:` → `run_scheduler(...)`
-      - 正答率を集計(平均)
-
-### plot_value_vs_used(...)
-**Inputs**: 同上(`return_details=True` で呼ぶ)  
-**Outputs**: `outputs/plot_value_vs_used_{noise_model}.pdf`(x=実消費コスト平均)  
-**Loops**:
-- `for C_total in budget_list:`
-  - `for r in range(repeat):`
-    - トポロジ生成
-    - `for name in scheduler_names:` → `run_scheduler(..., return_details=True)`
-      - 受け取った `alloc_by_path` と `est_fid_by_path` で価値を合成
-      - `total_cost` を保持
-
-### plot_value_vs_budget_target(...)
-**Inputs**: 同上(`return_details=True` で呼ぶ)  
-**Outputs**: `outputs/plot_value_vs_budget_target_{noise_model}.pdf`(x=目標予算)  
-**Loops**: `plot_value_vs_used` と同一(プロット時の x だけ `budget_list`)
-
----
-
-## schedulers/__init__.py
-**Inputs**: `node_path_list`, `importance_list`, `scheduler_name`, `bounces`, `C_total`, `network_generator`, `return_details`  
-**Outputs**:
-- `"LNaive"` → `lnaive_budget_scheduler(...)` の戻り値をそのまま返す  
-- `"Greedy"` → `greedy_budget_scheduler(...)` の戻り値をそのまま返す  
-**Loops**: なし(ディスパッチのみ)
-
----
-
-## schedulers/lnaive_scheduler.py
-**Inputs**: `node_path_list`, `importance_list`, `bounces`, `C_total`, `network_generator`, `return_details=False`  
-**Outputs**:
-- `per_pair_results: List[(correct: bool, cost: int, best_fid: float|None)]`
-- `total_cost: int`
-- `per_pair_details: List[{alloc_by_path, est_fid_by_path}]`(`return_details=True` のとき)
-**Loops**:
-- `for pair_idx, path_num in enumerate(node_path_list):`
-  - 各ペアに等分配した予算で `naive_network_benchmarking_with_budget(...)` を1回呼ぶ
-
----
-
-## schedulers/greedy_scheduler.py
-**Inputs**: `node_path_list`, `importance_list`, `bounces`, `C_total`, `network_generator`, `C_initial_per_pair=40`, `return_details=False`  
-**Outputs**: 上記 LNaive と同形(初期+残余の詳細はマージ)  
-**Loops**:
-1) **初期プローブ**: `for pair_idx, path_num in enumerate(node_path_list):`
-   - 小予算で `lonline_network_benchmarking(...)` を実行
-2) **優先度計算**: `score = importance * estimated_fidelity` を全ペアで算出し降順ソート
-3) **残余配分**: `for pair_idx in sorted_indices:` 残り予算を順に集中投資(各回 `lonline_network_benchmarking(...)`)
-
----
-
-## schedulers/lonline_nb.py (アルゴ:L-Online)
-**Inputs**: `network`, `path_list`, `bounces`, `C_budget`, `return_details=False`  
-**Outputs**:
-- `return_details=False`: `(correct: bool, cost: int, best_fid: float|None)`
-- `return_details=True` : 上に加え `alloc_by_path: {path_id:int}`, `est_fid_by_path: {path_id:float}`
-**Loops**:
-- `while cost < C_budget and len(candidate_set) > 1:`
-  - ラウンド `s` のサンプル数 `Ns` を決定
-  - `for path in candidate_set:`(予算が入る分だけ計測)
-  - 連続削除ルールで候補を間引き
-
----
-
-## schedulers/lnaive_nb.py (アルゴ:L-Naive)
-**Inputs**: `network`, `path_list`, `bounces`, `C_budget`, `return_details=False`  
-**Outputs**: L-Online と同形(3タプル/5タプル)  
-**Loops**:
-- `Ns = floor(C_budget / (len(path_list) * per_sample_cost))` を計算
-- `for path in path_list:` 各経路を同じ回数 `Ns` 測定して推定更新
-

+ 0 - 99
add_linkselfie/MEMO.org~

@@ -1,99 +0,0 @@
-# add_linkselfie – I/O & Loops memo
-
-## main.py
-**Inputs**
-- 実験パラメータ: `budget_list`, `scheduler_names`, `noise_model_names`, `node_path_list`, `importance_list`, `bounces`, `repeat`
-
-**Outputs**
-- `evaluation.py` の各関数を並列実行 → `outputs/*.pdf` を生成
-
-**Loops**
-- `for noise_model in noise_model_names:`
-  - `Pool.apply_async(...)` で
-    - `plot_accuracy_vs_budget(...)`
-    - `plot_value_vs_used(...)`
-    - `plot_value_vs_budget_target(...)`
-  - 最後に `join()` して回収
-
----
-
-## evaluation.py
-### plot_accuracy_vs_budget(...)
-**Inputs**: 予算列・スケジューラ名・ノイズモデル・ノード/重要度・`bounces`・`repeat`  
-**Outputs**: `outputs/plot_accuracy_vs_budget_{noise_model}.pdf`  
-**Loops**:
-- `for C_total in budget_list:`  
-  - `for r in range(repeat):`
-    - トポロジ生成(ペアごとに fidelity リスト)
-    - `for name in scheduler_names:` → `run_scheduler(...)`
-      - 正答率を集計(平均)
-
-### plot_value_vs_used(...)
-**Inputs**: 同上(`return_details=True` で呼ぶ)  
-**Outputs**: `outputs/plot_value_vs_used_{noise_model}.pdf`(x=実消費コスト平均)  
-**Loops**:
-- `for C_total in budget_list:`
-  - `for r in range(repeat):`
-    - トポロジ生成
-    - `for name in scheduler_names:` → `run_scheduler(..., return_details=True)`
-      - 受け取った `alloc_by_path` と `est_fid_by_path` で価値を合成
-      - `total_cost` を保持
-
-### plot_value_vs_budget_target(...)
-**Inputs**: 同上(`return_details=True` で呼ぶ)  
-**Outputs**: `outputs/plot_value_vs_budget_target_{noise_model}.pdf`(x=目標予算)  
-**Loops**: `plot_value_vs_used` と同一(プロット時の x だけ `budget_list`)
-
----
-
-## schedulers/__init__.py
-**Inputs**: `node_path_list`, `importance_list`, `scheduler_name`, `bounces`, `C_total`, `network_generator`, `return_details`  
-**Outputs**:
-- `"LNaive"` → `lnaive_budget_scheduler(...)` の戻り値をそのまま返す  
-- `"Greedy"` → `greedy_budget_scheduler(...)` の戻り値をそのまま返す  
-**Loops**: なし(ディスパッチのみ)
-
----
-
-## schedulers/lnaive_scheduler.py
-**Inputs**: `node_path_list`, `importance_list`, `bounces`, `C_total`, `network_generator`, `return_details=False`  
-**Outputs**:
-- `per_pair_results: List[(correct: bool, cost: int, best_fid: float|None)]`
-- `total_cost: int`
-- `per_pair_details: List[{alloc_by_path, est_fid_by_path}]`(`return_details=True` のとき)
-**Loops**:
-- `for pair_idx, path_num in enumerate(node_path_list):`
-  - 各ペアに等分配した予算で `naive_network_benchmarking_with_budget(...)` を1回呼ぶ
-
----
-
-## schedulers/greedy_scheduler.py
-**Inputs**: `node_path_list`, `importance_list`, `bounces`, `C_total`, `network_generator`, `C_initial_per_pair=40`, `return_details=False`  
-**Outputs**: 上記 LNaive と同形(初期+残余の詳細はマージ)  
-**Loops**:
-1) **初期プローブ**: `for pair_idx, path_num in enumerate(node_path_list):`
-   - 小予算で `lonline_network_benchmarking(...)` を実行
-2) **優先度計算**: `score = importance * estimated_fidelity` を全ペアで算出し降順ソート
-3) **残余配分**: `for pair_idx in sorted_indices:` 残り予算を順に集中投資(各回 `lonline_network_benchmarking(...)`)
-
----
-
-## schedulers/lonline_nb.py (アルゴ:L-Online)
-**Inputs**: `network`, `path_list`, `bounces`, `C_budget`, `return_details=False`  
-**Outputs**:
-- `return_details=False`: `(correct: bool, cost: int, best_fid: float|None)`
-- `return_details=True` : 上に加え `alloc_by_path: {path_id:int}`, `est_fid_by_path: {path_id:float}`
-**Loops**:
-- `while cost < C_budget and len(candidate_set) > 1:`
-  - ラウンド `s` のサンプル数 `Ns` を決定
-  - `for path in candidate_set:`(予算が入る分だけ計測)
-  - 連続削除ルールで候補を間引き
-
----
-
-## schedulers/lnaive_nb.py (アルゴ:L-Naive)
-**Inputs**: `network`, `path_list`, `bounces`, `C_budget`, `return_details=False`  
-**Outputs**: L-Online と同形(3タプル/5タプル)  
-**Loops**:
-- `Ns = floor(C_budget / (len(path_list) * per_sample_cost))` を計算
-- `for path in path_list:` 各経路を同じ回数 `Ns` 測定して推定更新

BIN
add_linkselfie/__pycache__/evaluation.cpython-38.pyc


+ 0 - 20
add_linkselfie/algorithms/__init__.py

@@ -1,20 +0,0 @@
-from .naive_nb import naive_network_benchmarking  # noqa: F401
-from .online_nb import online_network_benchmarking  # noqa: F401
-from .lonline_nb import lonline_network_benchmarking  # noqa: F401
-from .succ_elim_nb import \
-    successive_elimination_network_benchmarking  # noqa: F401
-
-
-def benchmark_using_algorithm(network, path_list, algorithm_name, bounces, sample_times,C_budget=None):
-    if algorithm_name == "Vanilla NB":
-        return naive_network_benchmarking(network, path_list, bounces, sample_times)
-    elif algorithm_name == "LinkSelFiE":
-        return online_network_benchmarking(network, path_list, bounces)
-    elif algorithm_name == "LimitLinkSelFiE":
-        return lonline_network_benchmarking(network, path_list, bounces, C_budget)
-
-    elif algorithm_name == "Succ. Elim. NB":
-        return successive_elimination_network_benchmarking(network, path_list, bounces)
-    else:
-        print("Error: Unknown algorithm name")
-        exit(1)

BIN
add_linkselfie/algorithms/__pycache__/__init__.cpython-38.pyc


BIN
add_linkselfie/algorithms/__pycache__/lonline_nb.cpython-38.pyc


BIN
add_linkselfie/algorithms/__pycache__/naive_nb.cpython-38.pyc


BIN
add_linkselfie/algorithms/__pycache__/online_nb.cpython-38.pyc


BIN
add_linkselfie/algorithms/__pycache__/succ_elim_nb.cpython-38.pyc


+ 0 - 40
add_linkselfie/algorithms/lnaive_nb.py

@@ -1,40 +0,0 @@
-# lnaive_nb.py
-def naive_network_benchmarking_with_budget(network, path_list, bounces, C_budget):
-    """
-    目的:
-      総予算 C_budget を各パスへ均等割り当てし、均等サンプリングで NB を実行。
-      実行コストは常に予算内(超過しない)。
-
-    出力:
-      (correctness, cost, best_path_fidelity)
-        correctness … 推定最良パスが真の最良と一致したか
-        cost        … 実測で消費した総コスト
-        best_path_fidelity … 推定最良パスの推定忠実度(naive変換後)
-    """
-    fidelity, cost = {}, 0
-    n_paths = len(path_list)
-    if n_paths == 0:
-        return False, 0, None
-
-    per_sample_cost = sum(bounces) or 1
-    per_path_budget = int(C_budget) // n_paths
-    Ns = per_path_budget // per_sample_cost  # 各パスのサンプル数
-    if Ns <= 0:
-        return False, 0, None
-
-    # 各 hop に同じ Ns を配る(既存 naive と同じ割当表)
-    sample_times = {h: int(Ns) for h in bounces}
-
-    # 各パスを均等回数でベンチマーク
-    for path in path_list:
-        p, used = network.benchmark_path(path, bounces, sample_times)
-        fidelity[path] = p + (1 - p) / 2  # 既存 naive と同じ忠実度変換
-        cost += used
-
-    if not fidelity:
-        return False, cost, None
-
-    best_path = max(fidelity, key=fidelity.get)
-    correctness = (best_path == getattr(network, "best_path", None))
-    best_path_fidelity = fidelity[best_path]
-    return correctness, cost, best_path_fidelity

+ 0 - 20
add_linkselfie/algorithms/lnaive_nb.py~

@@ -1,20 +0,0 @@
-def naive_network_benchmarking(network, path_list, bounces, sample_times):
-    fidelity = {}
-    cost = 0
-
-    # ガード: サンプル0なら何もしない
-    if sample_times and not any(sample_times.values()):
-        return 0, 0, None
-
-    for path in path_list:
-        p, bounces_num = network.benchmark_path(path, bounces, sample_times)
-        fidelity[path] = p + (1 - p) / 2
-        cost += bounces_num
-
-    if not fidelity:               # ガード: path_list空など
-        return 0, cost, None
-
-    best_path = max(fidelity, key=fidelity.get)
-    correctness = (best_path == network.best_path)
-    best_path_fidelity = fidelity[best_path]
-    return correctness, cost, best_path_fidelity

+ 0 - 68
add_linkselfie/algorithms/lonline_nb.py

@@ -1,68 +0,0 @@
-import math
-
-def lonline_network_benchmarking(network, path_list, bounces, C_budget):
-    candidate_set = list(path_list)
-    s = 0
-    C = 0.01     
-    delta = 0.1  
-    cost = 0
-    estimated_fidelities = {}
-
-    # 1経路を1サンプル測る想定コスト(list 前提)
-    cost_per_sample_unit = sum(bounces) if sum(bounces) > 0 else 1
-
-    while cost < C_budget and len(candidate_set) > 1:
-        s += 1
-        Ns = math.ceil(C * (2 ** (2 * s)) * math.log2(max((2 ** s) * len(candidate_set) / delta, 2)))
-        if Ns < 4:
-            Ns = 4
-
-        # --- 事前コスト見積り(lonline準拠) ---
-        cost_needed_for_one_path = Ns * cost_per_sample_unit
-        # 2ラウンド目以降で1経路ぶんすら入らないなら終了
-        if cost + cost_needed_for_one_path > C_budget and s > 1:
-            break
-        # 1ラウンド目だけは Ns を縮退してでも1回は回す(入らなければ中止)
-        """
-        if cost + cost_needed_for_one_path > C_budget and s == 1:
-            Ns_fit = (C_budget - cost) // max(cost_per_sample_unit, 1)
-            if Ns_fit <= 0:
-                break
-            Ns = int(Ns_fit)
-            cost_needed_for_one_path = Ns * cost_per_sample_unit
-        """
-        # ---------------------------------------
-
-        sample_times = {i: Ns for i in bounces}
-
-        p_s = {}
-        measured_paths = []  # このラウンドで実際に測れた経路だけを削除判定に使う
-        for path in list(candidate_set):
-            if cost + cost_needed_for_one_path > C_budget:
-                continue  # 予算に入らない経路はこのラウンドはスキップ
-            p, bounces_num = network.benchmark_path(path, bounces, sample_times)
-            cost += bounces_num
-            estimated_fidelities[path] = p + (1 - p) / 2  # ★ 既存式&変数名を踏襲
-            p_s[path] = p
-            measured_paths.append(path)
-
-        if not p_s:
-            break  # このラウンドで1つも測れなかった
-
-        # online_nb.py と同じ 2^{-s} 幅の連続削除
-        p_max = max(p_s.values())
-        new_candidate_set = []
-        for path in measured_paths:  # 測れたものだけで判定(KeyError防止)
-            if p_s[path] + 2**(-s) > p_max - 2**(-s):
-                new_candidate_set.append(path)
-
-        # もし全消しになったら、保険として現集合を維持
-        candidate_set = new_candidate_set or candidate_set
-
-    if not estimated_fidelities:
-        return None, cost, None
-
-    best_path = max(estimated_fidelities, key=estimated_fidelities.get)
-    best_path_fidelity = estimated_fidelities[best_path]
-    correctness = (best_path == getattr(network, "best_path", None))
-    return correctness, cost, best_path_fidelity

+ 0 - 45
add_linkselfie/algorithms/lonline_nb.py~

@@ -1,45 +0,0 @@
-def _online_network_benchmarking_truly_strict(network, path_list, bounces, C_budget):
-    candidate_set = list(path_list)
-    s = 0
-    C_const = 0.01
-    delta = 0.1
-    cost = 0
-    estimated_fidelities = {}
-    
-
-    cost_per_sample_unit = sum(bounces)
-    if cost_per_sample_unit == 0:
-        cost_per_sample_unit = 1
-
-    while cost < C_budget and len(candidate_set) > 1:
-        s += 1
-        Ns = math.ceil(C_const * 2**(2 * s) * math.log2(2**s * len(candidate_set) / delta))
-        if Ns < 4:
-            Ns = 4
-        
-        cost_needed_for_one_path = Ns * cost_per_sample_unit
-        if cost + cost_needed_for_one_path > C_budget and s > 1:
-             break
-
-        p_s = {}
-        sample_times = {i: Ns for i in bounces}
-        for path in candidate_set:
-            if cost + cost_needed_for_one_path > C_budget:
-                continue
-            p, bounces_num = network.benchmark_path(path, bounces, sample_times)
-            cost += bounces_num
-            estimated_fidelity = p + (1 - p) / 2
-            estimated_fidelities[path] = estimated_fidelity
-            p_s[path] = p
-        if not p_s:
-            break
-        p_max = max(p_s.values())
-        new_candidate_set = [path for path in candidate_set if path in p_s and p_s[path] + 2**(-s) > p_max - 2**(-s)]
-        candidate_set = new_candidate_set
-        
-    if not estimated_fidelities:
-
-    best_path = max(estimated_fidelities, key=estimated_fidelities.get)
-    best_path_fidelity = estimated_fidelities[best_path]
-    correctness = (best_path == network.best_path)
-

+ 0 - 27
add_linkselfie/algorithms/memo.txt

@@ -1,27 +0,0 @@
-入力
-network(オブジェクト)
-
-ここは“実験器”役。内部で乱数やノイズモデルを使ってOK
-
-path_list(list)
-
-測定対象のパスの集合(例:[1, 2, 3, 4] やタプルの列など)
-
-bounces(list)
-
-1回の経路ベンチマークで各 hop が消費する重み(コスト)
-例:[1, 2, 3, 4] → 1サンプルの概算コストは sum(bounces)=10
-
-C_budget(int)
-
-総測定予算(絶対に超えない)
-
-
-
-
-出力
-correctness: 選んだパスが真の最良パスかどうか(True/False)
-
-cost: 使った総コスト
-
-best_path_fidelity: 選んだパスの推定忠実度

+ 0 - 5
add_linkselfie/algorithms/memo.txt~

@@ -1,5 +0,0 @@
-correctness: 選んだパスが真の最良パスかどうか(True/False)
-
-cost: 使った総コスト
-
-best_path_fidelity: 選んだパスの推定忠実度

+ 0 - 20
add_linkselfie/algorithms/naive_nb.py

@@ -1,20 +0,0 @@
-import numpy as np
-
-
-def naive_network_benchmarking(network, path_list, bounces, sample_times):
-    '''Perform vanilla network benchmarking for each path in the `path_list`.
-    Return a tuple: (a list of fidelities, the total number of bounces).
-    '''
-    fidelity = {}
-    cost = 0
-    for path in path_list:
-        p, bounces_num = network.benchmark_path(path, bounces, sample_times)
-        # fidelity.append(result)
-        fidelity[path] = p + (1 - p) / 2  # Convert the estimated depolarizing parameter `p` into fidelity
-        cost += bounces_num
-    # print("Estimated fidelity:", fidelity)
-    # best_path = np.argmax(fidelity) + 1
-    best_path = max(fidelity, key=fidelity.get)
-    correctness = best_path == network.best_path
-    best_path_fidelity = fidelity[best_path]
-    return correctness, cost, best_path_fidelity

+ 0 - 51
add_linkselfie/algorithms/online_nb.py

@@ -1,51 +0,0 @@
-import math
-
-
-def online_network_benchmarking(network, path_list, bounces):
-    # Initialization
-    candidate_set = path_list
-    s = 0  # Phase
-    C = 0.01  # Constant
-    delta = 0.1  # Error
-    cost = 0
-    estimated_fidelities = {}
-    # error_probability = {}  # Map bounces number to the probability that the arm with largest mean is not the best arm
-    # epoch_len = 20
-    # epoch = 0
-    # correct_times = 0
-    while len(candidate_set) > 1:
-        s += 1
-        Ns = math.ceil(C * 2**(2 * s) * math.log2(2**s * len(candidate_set) / delta))
-        if Ns < 4:
-            Ns = 4
-        # print(f"Ns: {Ns}")
-        sample_times = {}
-        for i in bounces:
-            sample_times[i] = Ns
-
-        p_s = {}
-        for path in candidate_set:
-            p, bounces_num = network.benchmark_path(path, bounces, sample_times)
-            # print(f"Estimated Fidelity of path {path}: {p}")
-            # Convert the estimated depolarizing parameter `p` into fidelity
-            estimated_fidelities[path] = p + (1 - p) / 2
-            p_s[path] = p
-            cost += bounces_num
-        p_max = max(p_s.values())
-        # current_best_path = max(p_s, key=p_s.get)
-        # if current_best_path == network.best_path:
-        #     correct_times += 1
-        new_candidate_set = []
-        for path in candidate_set:
-            # print(f"p_s[path] + 2**(-s): {p_s[path] + 2**(-s)}")
-            # print(f"p_max - 2**(-s): {p_max - 2**(-s)}")
-            if p_s[path] + 2**(-s) > p_max - 2**(-s):
-                new_candidate_set.append(path)
-        candidate_set = new_candidate_set
-
-    assert len(candidate_set) == 1
-    best_path = candidate_set[0]
-    correctness = best_path == network.best_path
-    best_path_fidelity = estimated_fidelities[best_path]
-    # print(f"Best path: {best_path}, estimated parameter p: {p_s[best_path]}, cost: {cost}")
-    return correctness, cost, best_path_fidelity

+ 0 - 60
add_linkselfie/algorithms/succ_elim_nb.py

@@ -1,60 +0,0 @@
-import math
-
-
-def successive_elimination_network_benchmarking(network, path_list, bounces):
-    # Initialization
-    L = len(path_list)
-    active_set = path_list
-    C = 0.15
-    N = 4
-    cost = 0
-    delta = 0.1
-    sample_times = {}
-    for i in bounces:
-        sample_times[i] = N
-
-    mean = {path: 0 for path in path_list}
-    n = {path: 0 for path in path_list}
-    t = 0
-    while len(active_set) > 1:
-        t += 1
-        ucb = {}
-        lcb = {}
-        for path in active_set:
-            p, bounces_num = network.benchmark_path(path, bounces, sample_times)
-            if p > 1.5:
-                print(f"Get an abnormal p={p}")
-            cost += bounces_num
-            mean[path] = (mean[path] * n[path] + p) / (n[path] + 1)
-            n[path] += 1
-            r = C * math.sqrt(math.log(4 * L * t * t / delta) / (2 * t))
-            # print(f"r={r}, {math.log(4 * L * t * t / delta)}")
-            ucb[path] = mean[path] + r
-            lcb[path] = mean[path] - r
-            # print(f"mean[{path}] = {mean[path]}")
-            # print(f"ucb[{path}] = {ucb[path]}")
-            # print(f"lcb[{path}] = {lcb[path]}")
-        new_active_set = []
-        for path1 in active_set:
-            ok = True
-            for path2 in active_set:
-                if path1 != path2 and ucb[path1] < lcb[path2]:
-                    ok = False
-                    break
-            if ok:
-                new_active_set.append(path1)
-
-        active_set = new_active_set
-        # print(f"Length of active set: {len(active_set)}")
-
-    assert len(active_set) == 1
-    best_path = active_set[0]
-    correctness = best_path == network.best_path
-    # print(f"Succ Elim NB: Best path: {best_path}, estimated parameter p: {mean[best_path]}, cost: {cost}")
-    estimated_fidelity = {}
-    for path in path_list:
-        p = mean[path]
-        # Convert the estimated depolarizing parameter `p` into fidelity
-        estimated_fidelity[path] = p + (1 - p) / 2
-    best_path_fidelity = estimated_fidelity[best_path]
-    return correctness, cost, best_path_fidelity

+ 0 - 20
add_linkselfie/dump.txt

@@ -1,20 +0,0 @@
-
-add_linkselfieの主要構成
-
-
-evaluation.py
-main.py
-network.py
-nb_protocol.py
-	/metrics
-		widths.py
-	/viz
-		plots.py
-	/outputs
-	
-	/schedulers
-		greedy_scheduler.py
-  		__init__.py
-		lnaive_nb.py
-		lnaive_scheduler.py
-  		lonline_nb.py

+ 0 - 1
add_linkselfie/evalationmemo.txt~

@@ -1 +0,0 @@
-a

+ 0 - 575
add_linkselfie/evaluation.py

@@ -1,575 +0,0 @@
-# evaluation.py — Run shared sweep once; all plots aggregate from cache (Py3.8-safe)
-
-import math
-import os
-import pickle
-import time
-import shutil
-import json
-import hashlib
-
-import matplotlib.pyplot as plt
-import numpy as np
-from cycler import cycler
-
-# metrics / viz を外出し(UNIX的分離)
-from metrics.widths import (
-    ci_radius_hoeffding,
-    sum_weighted_widths_all_links,
-    sum_weighted_min_widths_perpair,
-    sum_widths_all_links,
-    sum_minwidths_perpair,
-)
-from viz.plots import mean_ci95, plot_with_ci_band
-
-from network import QuantumNetwork
-from schedulers import run_scheduler  # スケジューラ呼び出し
-
-# ---- Matplotlib style(互換性重視: hex色 & 無難な記号類)----
-plt.rc("font", family="Times New Roman")
-plt.rc("font", size=20)
-default_cycler = (
-    cycler(color=["#4daf4a", "#377eb8", "#e41a1c", "#984ea3", "#ff7f00", "#a65628"])
-    + cycler(marker=["s", "v", "o", "x", "*", "+"])
-    + cycler(linestyle=[":", "--", "-", "-.", "--", ":"])
-)
-plt.rc("axes", prop_cycle=default_cycler)
-
-# =========================
-# Fidelity generators
-# =========================
-def generate_fidelity_list_avg_gap(path_num):
-    result = []
-    fidelity_max = 1
-    fidelity_min = 0.9
-    gap = (fidelity_max - fidelity_min) / path_num
-    fidelity = fidelity_max
-    for _ in range(path_num):
-        result.append(fidelity)
-        fidelity -= gap
-    assert len(result) == path_num
-    return result
-
-def generate_fidelity_list_fix_gap(path_num, gap, fidelity_max=1):
-    result = []
-    fidelity = fidelity_max
-    for _ in range(path_num):
-        result.append(fidelity)
-        fidelity -= gap
-    assert len(result) == path_num
-    return result
-
-def generate_fidelity_list_random(path_num, alpha=0.95, beta=0.85, variance=0.1):
-    """Generate `path_num` links with a guaranteed top-1 gap."""
-    while True:
-        mean = [alpha] + [beta] * (path_num - 1)
-        result = []
-        for i in range(path_num):
-            mu = mean[i]
-            # [0.8, 1.0] の範囲に入るまでサンプリング
-            while True:
-                r = np.random.normal(mu, variance)
-                if 0.8 <= r <= 1.0:
-                    break
-            result.append(r)
-        assert len(result) == path_num
-        sorted_res = sorted(result, reverse=True)
-        if sorted_res[0] - sorted_res[1] > 0.02:
-            return result
-
-# =========================
-# Progress helpers
-# =========================
-def _start_timer():
-    return {"t0": time.time(), "last": time.time()}
-
-def _tick(timer):
-    now = time.time()
-    dt_total = now - timer["t0"]
-    dt_step = now - timer["last"]
-    timer["last"] = now
-    return dt_total, dt_step
-
-def _log(msg):
-    print(msg, flush=True)
-
-# =========================
-# Shared sweep (cache) helpers with file lock
-# =========================
-def _sweep_signature(budget_list, scheduler_names, noise_model,
-                     node_path_list, importance_list, bounces, repeat):
-    payload = {
-        "budget_list": list(budget_list),
-        "scheduler_names": list(scheduler_names),
-        "noise_model": str(noise_model),
-        "node_path_list": list(node_path_list),
-        "importance_list": list(importance_list),
-        "bounces": list(bounces),
-        "repeat": int(repeat),
-        "version": 1,
-    }
-    sig = hashlib.md5(json.dumps(payload, sort_keys=True).encode("utf-8")).hexdigest()[:10]
-    return payload, sig
-
-def _shared_sweep_path(noise_model, sig):
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-    return os.path.join(outdir, f"shared_sweep_{noise_model}_{sig}.pickle")
-
-def _run_or_load_shared_sweep(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10,
-    verbose=True, print_every=1,
-):
-    config, sig = _sweep_signature(budget_list, scheduler_names, noise_model,
-                                   node_path_list, importance_list, bounces, repeat)
-    cache_path = _shared_sweep_path(noise_model, sig)
-    lock_path  = cache_path + ".lock"
-    STALE_LOCK_SECS = 6 * 60 * 60        # 6時間無更新ならロック回収
-    HEARTBEAT_EVERY = 5.0                # 生成側のロック更新間隔(秒)
-
-    # 既存キャッシュがあれば即ロード
-    if os.path.exists(cache_path):
-        if verbose: _log(f"[shared] Load cached sweep: {os.path.basename(cache_path)}")
-        with open(cache_path, "rb") as f:
-            return pickle.load(f)
-
-    # --- ロック獲得(初回生成は1プロセスのみ)---
-    got_lock = False
-    while True:
-        try:
-            fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
-            os.close(fd)
-            got_lock = True
-            break
-        except FileExistsError:
-            # 他プロセスが生成中:完成を待つ(タイムアウトなし)
-            if os.path.exists(cache_path):
-                with open(cache_path, "rb") as f:
-                    return pickle.load(f)
-
-            # スタックロック検出:長時間 mtime 更新がない場合は回収
-            try:
-                age = time.time() - os.path.getmtime(lock_path)
-            except OSError:
-                age = 0
-            if age > STALE_LOCK_SECS:
-                if verbose: _log("[shared] Stale lock detected. Removing...")
-                try: os.remove(lock_path)
-                except FileNotFoundError: pass
-                continue
-
-            # 進捗待ち
-            if verbose: _log("[shared] Waiting for cache to be ready...")
-            time.sleep(1.0)
-
-    try:
-        if verbose: _log(f"[shared] Run sweep and cache to: {os.path.basename(cache_path)}")
-
-        data = {name: {k: [] for k in range(len(budget_list))} for name in scheduler_names}
-        last_hb = time.time()
-
-        for k, C_total in enumerate(budget_list):
-            if verbose: _log(f"=== [SHARED {noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===")
-
-            for r in range(repeat):
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    _log(f"  [repeat {r+1}/{repeat}]")
-
-                # ハートビート(ロックの mtime を更新して“生存”を伝える)
-                now = time.time()
-                if now - last_hb >= HEARTBEAT_EVERY:
-                    try: os.utime(lock_path, None)
-                    except FileNotFoundError: pass
-                    last_hb = now
-
-                # 1リピート = 1トポロジ
-                fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-                def network_generator(path_num, pair_idx):
-                    return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-                for name in scheduler_names:
-                    per_pair_results, total_cost, per_pair_details = run_scheduler(
-                        node_path_list=node_path_list,
-                        importance_list=importance_list,
-                        scheduler_name=name,
-                        bounces=list(bounces),
-                        C_total=int(C_total),
-                        network_generator=network_generator,
-                        return_details=True,
-                    )
-                    data[name][k].append({
-                        "per_pair_results": per_pair_results,
-                        "per_pair_details": per_pair_details,
-                        "total_cost": total_cost,
-                    })
-
-        payload = {"config": config, "budget_list": list(budget_list), "data": data}
-
-        # アトミック書き込み
-        tmp = cache_path + ".tmp"
-        with open(tmp, "wb") as f:
-            pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
-        os.replace(tmp, cache_path)
-
-        return payload
-
-    finally:
-        if got_lock:
-            try: os.remove(lock_path)
-            except FileNotFoundError: pass
-# =========================
-# 1) Accuracy: 平均のみ(CIなし)
-# =========================
-def plot_accuracy_vs_budget(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10,
-    verbose=True, print_every=1,
-):
-    file_name = f"plot_accuracy_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-
-    payload = _run_or_load_shared_sweep(
-        budget_list, scheduler_names, noise_model,
-        node_path_list, importance_list,
-        bounces=bounces, repeat=repeat,
-        verbose=verbose, print_every=print_every,
-    )
-
-    results = {name: {"accs": [[] for _ in budget_list]} for name in scheduler_names}
-    for name in scheduler_names:
-        for k in range(len(budget_list)):
-            for run in payload["data"][name][k]:
-                per_pair_results = run["per_pair_results"]
-                acc = float(np.mean([1.0 if c else 0.0 for (c, _cost, _bf) in per_pair_results])) if per_pair_results else 0.0
-                results[name]["accs"][k].append(acc)
-
-    # plot
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    xs = list(budget_list)
-    for name, data in results.items():
-        avg_accs = [float(np.mean(v)) if v else 0.0 for v in data["accs"]]
-        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
-        ax.plot(xs, avg_accs, linewidth=2.0, label=label)
-    ax.set_xlabel("Total Budget (C)")
-    ax.set_ylabel("Average Correctness")
-    ax.grid(True); ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
-    plt.tight_layout()
-    pdf = f"{file_name}.pdf"
-    plt.savefig(pdf); 
-    if shutil.which("pdfcrop"): os.system(f"pdfcrop {pdf} {pdf}")
-    _log(f"Saved: {pdf}")
-
-# =========================
-# 2) Value vs Used(x=実コスト平均)
-# =========================
-def plot_value_vs_used(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10,
-    verbose=True, print_every=1,
-):
-    file_name = f"plot_value_vs_used_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-
-    payload = _run_or_load_shared_sweep(
-        budget_list, scheduler_names, noise_model,
-        node_path_list, importance_list,
-        bounces=bounces, repeat=repeat,
-        verbose=verbose, print_every=print_every,
-    )
-
-    results = {name: {"values": [[] for _ in budget_list], "costs": [[] for _ in budget_list]} for name in scheduler_names}
-    for name in scheduler_names:
-        for k in range(len(budget_list)):
-            for run in payload["data"][name][k]:
-                per_pair_details = run["per_pair_details"]
-                total_cost = int(run["total_cost"])
-                # value = Σ_d I_d Σ_l est(d,l) * alloc(d,l)
-                value = 0.0
-                for d, det in enumerate(per_pair_details):
-                    alloc = det.get("alloc_by_path", {})
-                    est   = det.get("est_fid_by_path", {})
-                    inner = sum(float(est.get(l, 0.0)) * int(b) for l, b in alloc.items())
-                    I = float(importance_list[d]) if d < len(importance_list) else 1.0
-                    value += I * inner
-                results[name]["values"][k].append(float(value))
-                results[name]["costs"][k].append(total_cost)
-
-    # plot
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    for name, dat in results.items():
-        xs = [float(np.mean(v)) if v else 0.0 for v in dat["costs"]]
-        ys = [float(np.mean(v)) if v else 0.0 for v in dat["values"]]
-        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
-        ax.plot(xs, ys, linewidth=2.0, marker="o", label=label)
-    ax.set_xlabel("Total Measured Cost (used)")
-    ax.set_ylabel("Total Value (Σ I_d Σ f̂_{d,l}·B_{d,l})")
-    ax.grid(True); ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf = f"{file_name}.pdf"
-    plt.savefig(pdf); 
-    if shutil.which("pdfcrop"): os.system(f"pdfcrop {pdf} {pdf}")
-    _log(f"Saved: {pdf}")
-
-# =========================
-# 3) Value vs Budget target(x=目標予算)
-# =========================
-def plot_value_vs_budget_target(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10,
-    verbose=True, print_every=1,
-):
-    file_name = f"plot_value_vs_budget_target_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-
-    payload = _run_or_load_shared_sweep(
-        budget_list, scheduler_names, noise_model,
-        node_path_list, importance_list,
-        bounces=bounces, repeat=repeat,
-        verbose=verbose, print_every=print_every,
-    )
-
-    results = {name: {"values": [[] for _ in budget_list]} for name in scheduler_names}
-    for name in scheduler_names:
-        for k in range(len(budget_list)):
-            for run in payload["data"][name][k]:
-                per_pair_details = run["per_pair_details"]
-                value = 0.0
-                for d, det in enumerate(per_pair_details):
-                    alloc = det.get("alloc_by_path", {})
-                    est   = det.get("est_fid_by_path", {})
-                    inner = sum(float(est.get(l, 0.0)) * int(b) for l, b in alloc.items())
-                    I = float(importance_list[d]) if d < len(importance_list) else 1.0
-                    value += I * inner
-                results[name]["values"][k].append(float(value))
-
-    # plot
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    xs = list(budget_list)
-    for name, dat in results.items():
-        ys = [float(np.mean(v)) if v else 0.0 for v in dat["values"]]
-        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
-        ax.plot(xs, ys, linewidth=2.0, marker="o", label=label)
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Total Value (Σ I_d Σ f̂_{d,l}·B_{d,l})")
-    ax.grid(True); ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf = f"{file_name}.pdf"
-    plt.savefig(pdf); 
-    if shutil.which("pdfcrop"): os.system(f"pdfcrop {pdf} {pdf}")
-    _log(f"Saved: {pdf}")
-
-# =========================
-# 4) 幅(UB-LB)Unweighted: 全リンク総和
-# =========================
-def plot_widthsum_alllinks_vs_budget(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10, delta=0.1,
-    verbose=True, print_every=1,
-):
-    file_name = f"plot_widthsum_alllinks_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-
-    payload = _run_or_load_shared_sweep(
-        budget_list, scheduler_names, noise_model,
-        node_path_list, importance_list,
-        bounces=bounces, repeat=repeat,
-        verbose=verbose, print_every=print_every,
-    )
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-    for name in scheduler_names:
-        for k in range(len(budget_list)):
-            for run in payload["data"][name][k]:
-                per_pair_details = run["per_pair_details"]
-                v = sum_widths_all_links(per_pair_details, delta=delta)
-                results[name]["sums"][k].append(v)
-
-    # plot (mean ± 95%CI)
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    xs = list(budget_list)
-    for name, dat in results.items():
-        means, halfs = [], []
-        for vals in dat["sums"]:
-            m, h = mean_ci95(vals); means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
-        ax.plot(xs, means, linewidth=2.0, marker="o", label=label)
-        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Sum of (UB - LB) over all links")
-    ax.grid(True); ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf = f"{file_name}.pdf"
-    plt.savefig(pdf); 
-    if shutil.which("pdfcrop"): os.system(f"pdfcrop {pdf} {pdf}")
-    _log(f"Saved: {pdf}")
-
-# =========================
-# 5) 幅(UB-LB)Unweighted: ペア最小幅の総和
-# =========================
-def plot_minwidthsum_perpair_vs_budget(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10, delta=0.1,
-    verbose=True, print_every=1,
-):
-    file_name = f"plot_minwidthsum_perpair_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-
-    payload = _run_or_load_shared_sweep(
-        budget_list, scheduler_names, noise_model,
-        node_path_list, importance_list,
-        bounces=bounces, repeat=repeat,
-        verbose=verbose, print_every=print_every,
-    )
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-    for name in scheduler_names:
-        for k in range(len(budget_list)):
-            for run in payload["data"][name][k]:
-                per_pair_details = run["per_pair_details"]
-                v = sum_minwidths_perpair(per_pair_details, delta=delta)
-                results[name]["sums"][k].append(v)
-
-    # plot (mean ± 95%CI)
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    xs = list(budget_list)
-    for name, dat in results.items():
-        means, halfs = [], []
-        for vals in dat["sums"]:
-            m, h = mean_ci95(vals); means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
-        ax.plot(xs, means, linewidth=2.0, marker="o", label=label)
-        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Sum over pairs of min (UB - LB)")
-    ax.grid(True); ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf = f"{file_name}.pdf"
-    plt.savefig(pdf); 
-    if shutil.which("pdfcrop"): os.system(f"pdfcrop {pdf} {pdf}")
-    _log(f"Saved: {pdf}")
-
-# =========================
-# 6) 幅(UB-LB)Weighted: 全リンク I_d·幅 総和
-# =========================
-def plot_widthsum_alllinks_weighted_vs_budget(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10, delta=0.1,
-    verbose=True, print_every=1,
-):
-    file_name = f"plot_widthsum_alllinks_weighted_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-
-    payload = _run_or_load_shared_sweep(
-        budget_list, scheduler_names, noise_model,
-        node_path_list, importance_list,
-        bounces=bounces, repeat=repeat,
-        verbose=verbose, print_every=print_every,
-    )
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-    for name in scheduler_names:
-        for k in range(len(budget_list)):
-            for run in payload["data"][name][k]:
-                per_pair_details = run["per_pair_details"]
-                v = sum_weighted_widths_all_links(per_pair_details, importance_list, delta=delta)
-                results[name]["sums"][k].append(v)
-
-    # plot (mean ± 95%CI)
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    xs = list(budget_list)
-    for name, dat in results.items():
-        means, halfs = [], []
-        for vals in dat["sums"]:
-            m, h = mean_ci95(vals); means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
-        ax.plot(xs, means, linewidth=2.0, marker="o", label=label)
-        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Weighted Sum of Widths  Σ_d Σ_l I_d (UB - LB)")
-    ax.grid(True); ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
-    plt.tight_layout()
-    pdf = f"{file_name}.pdf"
-    plt.savefig(pdf); 
-    if shutil.which("pdfcrop"): os.system(f"pdfcrop {pdf} {pdf}")
-    _log(f"Saved: {pdf}")
-
-# =========================
-# 7) 幅(UB-LB)Weighted: ペアごとの I_d·最小幅 総和
-# =========================
-def plot_minwidthsum_perpair_weighted_vs_budget(
-    budget_list, scheduler_names, noise_model,
-    node_path_list, importance_list,
-    bounces=(1,2,3,4), repeat=10, delta=0.1,
-    verbose=True, print_every=1,
-):
-    file_name = f"plot_minwidthsum_perpair_weighted_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    outdir = os.path.join(root_dir, "outputs")
-    os.makedirs(outdir, exist_ok=True)
-
-    payload = _run_or_load_shared_sweep(
-        budget_list, scheduler_names, noise_model,
-        node_path_list, importance_list,
-        bounces=bounces, repeat=repeat,
-        verbose=verbose, print_every=print_every,
-    )
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-    for name in scheduler_names:
-        for k in range(len(budget_list)):
-            for run in payload["data"][name][k]:
-                per_pair_details = run["per_pair_details"]
-                v = sum_weighted_min_widths_perpair(per_pair_details, importance_list, delta=delta)
-                results[name]["sums"][k].append(v)
-
-    # plot (mean ± 95%CI)
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    xs = list(budget_list)
-    for name, dat in results.items():
-        means, halfs = [], []
-        for vals in dat["sums"]:
-            m, h = mean_ci95(vals); means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
-        ax.plot(xs, means, linewidth=2.0, marker="o", label=label)
-        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Weighted sum over pairs of min (UB - LB) (× I_d)")
-    ax.grid(True); ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf = f"{file_name}.pdf"
-    plt.savefig(pdf); 
-    if shutil.which("pdfcrop"): os.system(f"pdfcrop {pdf} {pdf}")
-    _log(f"Saved: {pdf}")

+ 0 - 852
add_linkselfie/evaluationold.py

@@ -1,852 +0,0 @@
-# evaluation.py
-# Run evaluation and plot figures
-import math
-import os
-import pickle
-import time
-import shutil
-
-import matplotlib.pyplot as plt
-import numpy as np
-from cycler import cycler
-
-from algorithms import benchmark_using_algorithm  # may be used elsewhere
-from network import QuantumNetwork
-from schedulers import run_scheduler  # パッケージ化したものを使う
-
-# ---- Matplotlib style (IEEE-ish) ----
-plt.rc("font", family="Times New Roman")
-plt.rc("font", size=20)
-default_cycler = (
-    cycler(color=["#4daf4a", "#377eb8", "#e41a1c", "#984ea3", "#ff7f00", "#a65628"])
-    + cycler(marker=["s", "v", "o", "x", "*", "+"])
-    + cycler(linestyle=[":", "--", "-", "-.", "--", ":"])
-)
-plt.rc("axes", prop_cycle=default_cycler)
-
-
-# =========================
-# Fidelity generators
-# =========================
-def generate_fidelity_list_avg_gap(path_num):
-    result = []
-    fidelity_max = 1
-    fidelity_min = 0.9
-    gap = (fidelity_max - fidelity_min) / path_num
-    fidelity = fidelity_max
-    for _ in range(path_num):
-        result.append(fidelity)
-        fidelity -= gap
-    assert len(result) == path_num
-    return result
-
-
-def generate_fidelity_list_fix_gap(path_num, gap, fidelity_max=1):
-    result = []
-    fidelity = fidelity_max
-    for _ in range(path_num):
-        result.append(fidelity)
-        fidelity -= gap
-    assert len(result) == path_num
-    return result
-
-
-def generate_fidelity_list_random(path_num, alpha=0.95, beta=0.85, variance=0.1):
-    """Generate `path_num` links.
-    u_1 = alpha, u_i = beta for all i = 2, 3, ..., n.
-    Fidelity_i ~ N(u_i, variance), clipped to [0.8, 1].
-    Ensure the top-1 gap is large enough (> 0.02) for termination guarantees.
-    """
-    while True:
-        mean = [alpha] + [beta] * (path_num - 1)
-        result = []
-        for i in range(path_num):
-            mu = mean[i]
-            # Sample a Gaussian random variable and make sure its value is in the valid range
-            while True:
-                r = np.random.normal(mu, variance)
-                # Depolarizing noise and amplitude damping noise models require fidelity >= 0.5
-                # Be conservative: require >= 0.8
-                if 0.8 <= r <= 1.0:
-                    break
-            result.append(r)
-        assert len(result) == path_num
-        sorted_res = sorted(result, reverse=True)
-        # To guarantee the termination of algorithms, we require that the gap is large enough
-        if sorted_res[0] - sorted_res[1] > 0.02:
-            return result
-
-
-# =========================
-# Progress helpers (LinkSelfie風)
-# =========================
-def _start_timer():
-    return {"t0": time.time(), "last": time.time()}
-
-
-def _tick(timer):
-    now = time.time()
-    dt_total = now - timer["t0"]
-    dt_step = now - timer["last"]
-    timer["last"] = now
-    return dt_total, dt_step
-
-
-def _log(msg):
-    print(msg, flush=True)
-
-
-# =========================
-# Plots
-# =========================
-def plot_accuracy_vs_budget(
-    budget_list,          # e.g., [1000, 2000, 3000, ...] (x-axis)
-    scheduler_names,      # e.g., ["LNaive", "Greedy", ...]
-    noise_model,          # e.g., "Depolar"
-    node_path_list,       # e.g., [5, 5, 5]
-    importance_list,      # e.g., [0.4, 0.7, 1.0] (not used here, but kept for interface)
-    bounces=(1, 2, 3, 4),
-    repeat=10,
-    verbose=True,
-    print_every=1,
-):
-    file_name = f"plot_accuracy_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    output_dir = os.path.join(root_dir, "outputs")
-    os.makedirs(output_dir, exist_ok=True)
-    file_path = os.path.join(output_dir, f"{file_name}.pickle")
-
-    if os.path.exists(file_path):
-        _log("Pickle data exists, skip simulation and plot the data directly.")
-        _log("To rerun, delete the pickle in `outputs`.")
-        with open(file_path, "rb") as f:
-            payload = pickle.load(f)
-            budget_list = payload["budget_list"]
-            results = payload["results"]
-    else:
-        results = {name: {"accs": [[] for _ in budget_list]} for name in scheduler_names}
-        for k, C_total in enumerate(budget_list):
-            timer = _start_timer()
-            if verbose:
-                _log(f"\n=== [{noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===")
-            for r in range(repeat):
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    _log(f"  [repeat {r+1}/{repeat}] generating topology …")
-                # 1リピート = 1トポロジ(全スケジューラで共有)
-                fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-                def network_generator(path_num, pair_idx):
-                    return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-                for name in scheduler_names:
-                    if verbose and ((r + 1) % print_every == 0 or r == 0):
-                        _log(f"    - {name}: running …")
-                    per_pair_results, _ = run_scheduler(
-                        node_path_list=node_path_list,
-                        importance_list=importance_list,
-                        scheduler_name=name,
-                        bounces=list(bounces),
-                        C_total=int(C_total),
-                        network_generator=network_generator,
-                    )
-                    acc = (
-                        float(np.mean([1.0 if c else 0.0 for (c, _cost, _bf) in per_pair_results]))
-                        if per_pair_results
-                        else 0.0
-                    )
-                    results[name]["accs"][k].append(acc)
-                    if verbose and ((r + 1) % print_every == 0 or r == 0):
-                        _log(f"      -> acc={acc:.3f}")
-            if verbose:
-                tot, _ = _tick(timer)
-                _log(f"=== done Budget={C_total} | elapsed {tot:.1f}s ===")
-
-        with open(file_path, "wb") as f:
-            pickle.dump({"budget_list": list(budget_list), "results": results}, f)
-
-    # --- Plot ---
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    x = list(budget_list)
-    for name, data in results.items():
-        avg_accs = [float(np.mean(v)) if v else 0.0 for v in data["accs"]]
-        label = name.replace("Vanilla NB", "VanillaNB").replace("Succ. Elim. NB", "SuccElimNB")
-        ax.plot(x, avg_accs, linewidth=2.0, label=label)
-
-    ax.set_xlabel("Total Budget (C)")
-    ax.set_ylabel("Average Correctness")
-    ax.grid(True)
-    ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
-    plt.tight_layout()
-    pdf_name = f"{file_name}.pdf"
-    plt.savefig(pdf_name)
-    if shutil.which("pdfcrop"):
-        os.system(f"pdfcrop {pdf_name} {pdf_name}")
-    _log(f"Saved: {pdf_name}")
-
-
-def plot_value_vs_used(
-    budget_list,
-    scheduler_names,
-    noise_model,
-    node_path_list,
-    importance_list,
-    bounces=(1, 2, 3, 4),
-    repeat=10,
-    verbose=True,
-    print_every=1,
-):
-    """x = 実コスト平均(used)で描く版。旧 plot_value_vs_budget と同等の挙動。"""
-    file_name = f"plot_value_vs_used_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    output_dir = os.path.join(root_dir, "outputs")
-    os.makedirs(output_dir, exist_ok=True)
-
-    results = {
-        name: {"values": [[] for _ in budget_list], "costs": [[] for _ in budget_list]}
-        for name in scheduler_names
-    }
-
-    for k, C_total in enumerate(budget_list):
-        timer = _start_timer()
-        if verbose:
-            _log(f"\n=== [{noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===")
-
-        # 1リピート = 1トポロジ(全スケジューラで共有)
-        fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-        def network_generator(path_num, pair_idx):
-            return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-        for r in range(repeat):
-            if verbose and ((r + 1) % print_every == 0 or r == 0):
-                _log(f"  [repeat {r+1}/{repeat}]")
-            for name in scheduler_names:
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    _log(f"    - {name}: running …")
-                per_pair_results, total_cost, per_pair_details = run_scheduler(
-                    node_path_list=node_path_list,
-                    importance_list=importance_list,
-                    scheduler_name=name,
-                    bounces=list(bounces),
-                    C_total=int(C_total),
-                    network_generator=network_generator,
-                    return_details=True,
-                )
-                # 価値の合成
-                value = 0.0
-                for d, details in enumerate(per_pair_details):
-                    alloc = details.get("alloc_by_path", {})
-                    est = details.get("est_fid_by_path", {})
-                    inner = sum(float(est.get(l, 0.0)) * int(b) for l, b in alloc.items())
-                    value += float(importance_list[d]) * inner
-
-                results[name]["values"][k].append(float(value))
-                results[name]["costs"][k].append(int(total_cost))
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    _log(f"      -> used={total_cost}, value={value:.2f}")
-
-        if verbose:
-            tot, _ = _tick(timer)
-            _log(f"=== done Budget={C_total} | elapsed {tot:.1f}s ===")
-
-    # --- Plot (x = 実コスト平均) ---
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    for name, data in results.items():
-        xs = [float(np.mean(v)) if v else 0.0 for v in data["costs"]]
-        ys = [float(np.mean(v)) if v else 0.0 for v in data["values"]]
-        ax.plot(xs, ys, linewidth=2.0, marker="o", label=name)
-
-    ax.set_xlabel("Total Measured Cost (used)")
-    ax.set_ylabel("Total Value (Σ I_d Σ f̂_{d,l}·B_{d,l})")
-    ax.grid(True)
-    ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf_name = f"{file_name}.pdf"
-    plt.savefig(pdf_name)
-    if shutil.which("pdfcrop"):
-        os.system(f"pdfcrop {pdf_name} {pdf_name}")
-    _log(f"Saved: {pdf_name}")
-
-
-def plot_value_vs_budget_target(
-    budget_list,
-    scheduler_names,
-    noise_model,
-    node_path_list,
-    importance_list,
-    bounces=(1, 2, 3, 4),
-    repeat=10,
-    verbose=True,
-    print_every=1,
-):
-    """x = 目標予算(指定した budget_list をそのまま x 軸に)で描く版。"""
-    file_name = f"plot_value_vs_budget_target_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    output_dir = os.path.join(root_dir, "outputs")
-    os.makedirs(output_dir, exist_ok=True)
-
-    results = {
-        name: {"values": [[] for _ in budget_list], "costs": [[] for _ in budget_list]}
-        for name in scheduler_names
-    }
-
-    for k, C_total in enumerate(budget_list):
-        timer = _start_timer()
-        if verbose:
-            _log(f"\n=== [{noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===")
-
-        fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-        def network_generator(path_num, pair_idx):
-            return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-        for r in range(repeat):
-            if verbose and ((r + 1) % print_every == 0 or r == 0):
-                _log(f"  [repeat {r+1}/{repeat}]")
-            for name in scheduler_names:
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    _log(f"    - {name}: running …")
-                per_pair_results, total_cost, per_pair_details = run_scheduler(
-                    node_path_list=node_path_list,
-                    importance_list=importance_list,
-                    scheduler_name=name,
-                    bounces=list(bounces),
-                    C_total=int(C_total),
-                    network_generator=network_generator,
-                    return_details=True,
-                )
-                value = 0.0
-                for d, details in enumerate(per_pair_details):
-                    alloc = details.get("alloc_by_path", {})
-                    est = details.get("est_fid_by_path", {})
-                    inner = sum(float(est.get(l, 0.0)) * int(b) for l, b in alloc.items())
-                    value += float(importance_list[d]) * inner
-
-                results[name]["values"][k].append(float(value))
-                results[name]["costs"][k].append(int(total_cost))
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    _log(f"      -> used={total_cost}, value={value:.2f}")
-
-        if verbose:
-            tot, _ = _tick(timer)
-            _log(f"=== done Budget={C_total} | elapsed {tot:.1f}s ===")
-
-    # --- Plot (x = 目標予算) ---
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    x = list(budget_list)
-    for name, data in results.items():
-        ys = [float(np.mean(v)) if v else 0.0 for v in data["values"]]
-        ax.plot(x, ys, linewidth=2.0, marker="o", label=name)
-
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Total Value (Σ I_d Σ f̂_{d,l}·B_{d,l})")
-    ax.grid(True)
-    ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf_name = f"{file_name}.pdf"
-    plt.savefig(pdf_name)
-    if shutil.which("pdfcrop"):
-        os.system(f"pdfcrop {pdf_name} {pdf_name}")
-    _log(f"Saved: {pdf_name}")
-
-
-# =========================
-# CI width helpers and plots
-# =========================
-def _ci_radius_hoeffding(n: int, delta: float = 0.1) -> float:
-    if n <= 0:
-        return 1.0
-    return math.sqrt(0.5 * math.log(2.0 / delta) / n)
-
-
-# =========================
-# Width-sum metrics (new)
-# =========================
-
-def _sum_widths_all_links(per_pair_details, delta: float = 0.1) -> float:
-    """
-    すべてのペア・すべてのリンクについて、(UB - LB) を合計。
-    est が無いリンクはスキップ(=寄与0)。測定していないリンクは数えません。
-    """
-    total = 0.0
-    for det in per_pair_details:
-        alloc = det.get("alloc_by_path", {})  # n = 測定回数
-        est   = det.get("est_fid_by_path", {})  # 標本平均
-        for pid, m in est.items():
-            n = int(alloc.get(pid, 0))
-            rad = _ci_radius_hoeffding(n, delta)
-            lb = max(0.0, float(m) - rad)
-            ub = min(1.0, float(m) + rad)
-            total += (ub - lb)
-    return float(total)
-
-
-def _sum_min_widths_per_pair(per_pair_details, delta: float = 0.1) -> float:
-    """
-    ペアごとにリンクの (UB - LB) を算出し、その「最小値」を取り、全ペアで合計。
-    est が空のペアは保守的に 1.0 を加算(“全く分からない”幅として扱う)。
-    """
-    s = 0.0
-    for det in per_pair_details:
-        alloc = det.get("alloc_by_path", {})
-        est   = det.get("est_fid_by_path", {})
-        if not est:
-            s += 1.0
-            continue
-        widths = []
-        for pid, m in est.items():
-            n = int(alloc.get(pid, 0))
-            rad = _ci_radius_hoeffding(n, delta)
-            lb = max(0.0, float(m) - rad)
-            ub = min(1.0, float(m) + rad)
-            widths.append(ub - lb)
-        s += (min(widths) if widths else 1.0)
-    return float(s)
-
-
-def plot_widthsum_alllinks_vs_budget(
-    budget_list,
-    scheduler_names,
-    noise_model,
-    node_path_list,
-    importance_list,
-    bounces=(1, 2, 3, 4),
-    repeat=10,
-    delta=0.1,
-    verbose=True,
-    print_every=1,
-):
-    """
-    y = 全リンク(UB-LB)総和 の平均 ±95%CI を、x = 目標予算 で描画。
-    生データは outputs/plot_widthsum_alllinks_vs_budget_*.pickle に保存。
-    """
-    file_name = f"plot_widthsum_alllinks_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    output_dir = os.path.join(root_dir, "outputs")
-    os.makedirs(output_dir, exist_ok=True)
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-
-    for k, C_total in enumerate(budget_list):
-        if verbose:
-            print(f"\n=== [{noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===", flush=True)
-
-        # 1リピート=1トポロジ(全スケジューラ共有)
-        fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-        def network_generator(path_num, pair_idx):
-            return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-        for r in range(repeat):
-            if verbose and ((r + 1) % print_every == 0 or r == 0):
-                print(f"  [repeat {r+1}/{repeat}]", flush=True)
-
-            for name in scheduler_names:
-                per_pair_results, total_cost, per_pair_details = run_scheduler(
-                    node_path_list=node_path_list,
-                    importance_list=importance_list,
-                    scheduler_name=name,
-                    bounces=list(bounces),
-                    C_total=int(C_total),
-                    network_generator=network_generator,
-                    return_details=True,
-                )
-                v = _sum_widths_all_links(per_pair_details, delta=delta)
-                results[name]["sums"][k].append(v)
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    print(f"    - {name}: sum_alllinks={v:.4f} (used={total_cost})", flush=True)
-
-    # --- Save raw data (.pickle) ---
-    file_path = os.path.join(output_dir, f"{file_name}.pickle")
-    with open(file_path, "wb") as f:
-        pickle.dump({"budget_list": list(budget_list), "results": results}, f)
-    print(f"Saved pickle: {file_path}")
-
-    # --- Plot mean ± 95% CI across repeats ---
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    x = list(budget_list)
-    for name, data in results.items():
-        means, halfs = [], []
-        for vals in data["sums"]:
-            m, h = mean_ci95(vals)
-            means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        ax.plot(x, means, linewidth=2.0, marker="o", label=name)
-        ax.fill_between(x, means - halfs, means + halfs, alpha=0.25)
-
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Sum of (UB - LB) over all links")
-    ax.grid(True)
-    ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf_name = f"{file_name}.pdf"
-    plt.savefig(pdf_name)
-    if shutil.which("pdfcrop"):
-        os.system(f"pdfcrop {pdf_name} {pdf_name}")
-    print(f"Saved: {pdf_name}")
-
-
-def plot_minwidthsum_perpair_vs_budget(
-    budget_list,
-    scheduler_names,
-    noise_model,
-    node_path_list,
-    importance_list,
-    bounces=(1, 2, 3, 4),
-    repeat=10,
-    delta=0.1,
-    verbose=True,
-    print_every=1,
-):
-    """
-    y = ペアごとの (UB-LB) 最小値の合計 の平均 ±95%CI、x = 目標予算。
-    生データは outputs/plot_minwidthsum_perpair_vs_budget_*.pickle に保存。
-    """
-    file_name = f"plot_minwidthsum_perpair_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    output_dir = os.path.join(root_dir, "outputs")
-    os.makedirs(output_dir, exist_ok=True)
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-
-    for k, C_total in enumerate(budget_list):
-        if verbose:
-            print(f"\n=== [{noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===", flush=True)
-
-        fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-        def network_generator(path_num, pair_idx):
-            return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-        for r in range(repeat):
-            if verbose and ((r + 1) % print_every == 0 or r == 0):
-                print(f"  [repeat {r+1}/{repeat}]", flush=True)
-
-            for name in scheduler_names:
-                per_pair_results, total_cost, per_pair_details = run_scheduler(
-                    node_path_list=node_path_list,
-                    importance_list=importance_list,
-                    scheduler_name=name,
-                    bounces=list(bounces),
-                    C_total=int(C_total),
-                    network_generator=network_generator,
-                    return_details=True,
-                )
-                v = _sum_min_widths_per_pair(per_pair_details, delta=delta)
-                results[name]["sums"][k].append(v)
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    print(f"    - {name}: sum_min_perpair={v:.4f} (used={total_cost})", flush=True)
-
-    # --- Save raw data (.pickle) ---
-    file_path = os.path.join(output_dir, f"{file_name}.pickle")
-    with open(file_path, "wb") as f:
-        pickle.dump({"budget_list": list(budget_list), "results": results}, f)
-    print(f"Saved pickle: {file_path}")
-
-    # --- Plot mean ± 95% CI across repeats ---
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    x = list(budget_list)
-    for name, data in results.items():
-        means, halfs = [], []
-        for vals in data["sums"]:
-            m, h = mean_ci95(vals)
-            means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        ax.plot(x, means, linewidth=2.0, marker="o", label=name)
-        ax.fill_between(x, means - halfs, means + halfs, alpha=0.25)
-
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Sum over pairs of min (UB - LB)")
-    ax.grid(True)
-    ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf_name = f"{file_name}.pdf"
-    plt.savefig(pdf_name)
-    if shutil.which("pdfcrop"):
-        os.system(f"pdfcrop {pdf_name} {pdf_name}")
-    print(f"Saved: {pdf_name}")
-
-
-
-# =========================
-# Weighted width-sum metrics (add-on)
-# =========================
-
-def _sum_weighted_widths_all_links(per_pair_details, importance_list, delta: float = 0.1) -> float:
-    """
-    すべてのペア・すべてのリンクの (UB-LB) に、ペア重要度 I_d を掛けて合計。
-    importance_list[d] が無ければ I_d=1.0 として扱う。
-    """
-    total = 0.0
-    for d, det in enumerate(per_pair_details):
-        I = float(importance_list[d]) if d < len(importance_list) else 1.0
-        alloc = det.get("alloc_by_path", {})
-        est   = det.get("est_fid_by_path", {})
-        for pid, m in est.items():
-            n = int(alloc.get(pid, 0))
-            rad = _ci_radius_hoeffding(n, delta)
-            lb = max(0.0, float(m) - rad)
-            ub = min(1.0, float(m) + rad)
-            total += I * (ub - lb)
-    return float(total)
-
-
-def _sum_weighted_min_widths_per_pair(per_pair_details, importance_list, delta: float = 0.1) -> float:
-    """
-    ペア d ごとに min_l (UB-LB) を計算し、I_d を掛けて全ペアで合計。
-    est が空のペアは保守的に幅=1.0 として I_d*1.0 を加算。
-    """
-    s = 0.0
-    for d, det in enumerate(per_pair_details):
-        I = float(importance_list[d]) if d < len(importance_list) else 1.0
-        alloc = det.get("alloc_by_path", {})
-        est   = det.get("est_fid_by_path", {})
-        if not est:
-            s += I * 1.0
-            continue
-        widths = []
-        for pid, m in est.items():
-            n = int(alloc.get(pid, 0))
-            rad = _ci_radius_hoeffding(n, delta)
-            lb = max(0.0, float(m) - rad)
-            ub = min(1.0, float(m) + rad)
-            widths.append(ub - lb)
-        s += I * (min(widths) if widths else 1.0)
-    return float(s)
-
-
-def plot_widthsum_alllinks_weighted_vs_budget(
-    budget_list,
-    scheduler_names,
-    noise_model,
-    node_path_list,
-    importance_list,
-    bounces=(1, 2, 3, 4),
-    repeat=10,
-    delta=0.1,
-    verbose=True,
-    print_every=1,
-):
-    """
-    y = Σ_d Σ_l I_d·(UB-LB) の平均 ±95%CI、x = 目標予算。
-    生データは outputs/plot_widthsum_alllinks_weighted_vs_budget_*.pickle に保存。
-    """
-    file_name = f"plot_widthsum_alllinks_weighted_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    output_dir = os.path.join(root_dir, "outputs")
-    os.makedirs(output_dir, exist_ok=True)
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-
-    for k, C_total in enumerate(budget_list):
-        if verbose:
-            print(f"\n=== [{noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===", flush=True)
-
-        fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-        def network_generator(path_num, pair_idx):
-            return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-        for r in range(repeat):
-            if verbose and ((r + 1) % print_every == 0 or r == 0):
-                print(f"  [repeat {r+1}/{repeat}]", flush=True)
-
-            for name in scheduler_names:
-                per_pair_results, total_cost, per_pair_details = run_scheduler(
-                    node_path_list=node_path_list,
-                    importance_list=importance_list,
-                    scheduler_name=name,
-                    bounces=list(bounces),
-                    C_total=int(C_total),
-                    network_generator=network_generator,
-                    return_details=True,
-                )
-                v = _sum_weighted_widths_all_links(per_pair_details, importance_list, delta=delta)
-                results[name]["sums"][k].append(v)
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    print(f"    - {name}: wsum_alllinks={v:.4f} (used={total_cost})", flush=True)
-
-    # --- Save raw data (.pickle) ---
-    file_path = os.path.join(output_dir, f"{file_name}.pickle")
-    with open(file_path, "wb") as f:
-        pickle.dump({"budget_list": list(budget_list), "results": results}, f)
-    print(f"Saved pickle: {file_path}")
-
-    # --- Plot mean ± 95% CI ---
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    x = list(budget_list)
-    for name, data in results.items():
-        means, halfs = [], []
-        for vals in data["sums"]:
-            m, h = mean_ci95(vals)
-            means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        ax.plot(x, means, linewidth=2.0, marker="o", label=name)
-        ax.fill_between(x, means - halfs, means + halfs, alpha=0.25)
-
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Weighted sum of (UB - LB) over all links (× I_d)")
-    ax.grid(True); ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf_name = f"{file_name}.pdf"
-    plt.savefig(pdf_name)
-    if shutil.which("pdfcrop"):
-        os.system(f"pdfcrop {pdf_name} {pdf_name}")
-    print(f"Saved: {pdf_name}")
-
-
-def plot_minwidthsum_perpair_weighted_vs_budget(
-    budget_list,
-    scheduler_names,
-    noise_model,
-    node_path_list,
-    importance_list,
-    bounces=(1, 2, 3, 4),
-    repeat=10,
-    delta=0.1,
-    verbose=True,
-    print_every=1,
-):
-    """
-    y = Σ_d I_d·min_l(UB-LB) の平均 ±95%CI、x = 目標予算。
-    生データは outputs/plot_minwidthsum_perpair_weighted_vs_budget_*.pickle に保存。
-    """
-    file_name = f"plot_minwidthsum_perpair_weighted_vs_budget_{noise_model}"
-    root_dir = os.path.dirname(os.path.abspath(__file__))
-    output_dir = os.path.join(root_dir, "outputs")
-    os.makedirs(output_dir, exist_ok=True)
-
-    results = {name: {"sums": [[] for _ in budget_list]} for name in scheduler_names}
-
-    for k, C_total in enumerate(budget_list):
-        if verbose:
-            print(f"\n=== [{noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===", flush=True)
-
-        fidelity_bank = [generate_fidelity_list_random(n) for n in node_path_list]
-
-        def network_generator(path_num, pair_idx):
-            return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
-
-        for r in range(repeat):
-            if verbose and ((r + 1) % print_every == 0 or r == 0):
-                print(f"  [repeat {r+1}/{repeat}]", flush=True)
-
-            for name in scheduler_names:
-                per_pair_results, total_cost, per_pair_details = run_scheduler(
-                    node_path_list=node_path_list,
-                    importance_list=importance_list,
-                    scheduler_name=name,
-                    bounces=list(bounces),
-                    C_total=int(C_total),
-                    network_generator=network_generator,
-                    return_details=True,
-                )
-                v = _sum_weighted_min_widths_per_pair(per_pair_details, importance_list, delta=delta)
-                results[name]["sums"][k].append(v)
-                if verbose and ((r + 1) % print_every == 0 or r == 0):
-                    print(f"    - {name}: wsum_min_perpair={v:.4f} (used={total_cost})", flush=True)
-
-    # --- Save raw data (.pickle) ---
-    file_path = os.path.join(output_dir, f"{file_name}.pickle")
-    with open(file_path, "wb") as f:
-        pickle.dump({"budget_list": list(budget_list), "results": results}, f)
-    print(f"Saved pickle: {file_path}")
-
-    # --- Plot mean ± 95% CI ---
-    plt.rc("axes", prop_cycle=default_cycler)
-    fig, ax = plt.subplots()
-    x = list(budget_list)
-    for name, data in results.items():
-        means, halfs = [], []
-        for vals in data["sums"]:
-            m, h = mean_ci95(vals)
-            means.append(m); halfs.append(h)
-        means = np.asarray(means); halfs = np.asarray(halfs)
-        ax.plot(x, means, linewidth=2.0, marker="o", label=name)
-        ax.fill_between(x, means - halfs, means + halfs, alpha=0.25)
-
-    ax.set_xlabel("Budget (target)")
-    ax.set_ylabel("Weighted sum over pairs of min (UB - LB) (× I_d)")
-    ax.grid(True); ax.legend(title="Scheduler")
-    plt.tight_layout()
-    pdf_name = f"{file_name}.pdf"
-    plt.savefig(pdf_name)
-    if shutil.which("pdfcrop"):
-        os.system(f"pdfcrop {pdf_name} {pdf_name}")
-    print(f"Saved: {pdf_name}")
-
-
-# =========================
-# 95%CI helpers (repeats 可変対応)
-# =========================
-# 小 n 用の簡易表(両側95%、df = n-1)
-_T95 = {
-    1: 12.706,
-    2: 4.303,
-    3: 3.182,
-    4: 2.776,
-    5: 2.571,
-    6: 2.447,
-    7: 2.365,
-    8: 2.306,
-    9: 2.262,
-    10: 2.228,
-    11: 2.201,
-    12: 2.179,
-    13: 2.160,
-    14: 2.145,
-    15: 2.131,
-    16: 2.120,
-    17: 2.110,
-    18: 2.101,
-    19: 2.093,
-    20: 2.086,
-    21: 2.080,
-    22: 2.074,
-    23: 2.069,
-    24: 2.064,
-    25: 2.060,
-    26: 2.056,
-    27: 2.052,
-    28: 2.048,
-    29: 2.045,
-}
-
-
-def tcrit_95(n: int) -> float:
-    """repeats=n に対する両側95% t臨界値 (df=n-1)。n<2 は 0 を返す。"""
-    if n <= 1:
-        return 0.0
-    df = n - 1
-    if df in _T95:
-        return _T95[df]
-    if df >= 30:
-        return 1.96  # 正規近似
-    return 2.13  # 小 n 保守値
-
-
-def mean_ci95(vals):
-    """同一 budget 上の値列 vals(可変 n)に対して (mean, halfwidth) を返す。"""
-    arr = np.asarray(vals, dtype=float)
-    n = len(arr)
-    if n == 0:
-        return 0.0, 0.0
-    if n == 1:
-        return float(arr[0]), 0.0
-    mean = float(arr.mean())
-    s = float(arr.std(ddof=1))
-    half = tcrit_95(n) * (s / math.sqrt(n))
-    return mean, half
-
-
-def _plot_with_ci_band(ax, xs, mean, half, *, label, line_kwargs=None, band_kwargs=None):
-    line_kwargs = {} if line_kwargs is None else dict(line_kwargs)
-    band_kwargs = {"alpha": 0.25} | ({} if band_kwargs is None else dict(band_kwargs))
-    ax.plot(xs, mean, label=label, **line_kwargs)
-    ax.fill_between(xs, mean - half, mean + half, **band_kwargs)

+ 0 - 24
add_linkselfie/fidelity.py

@@ -1,24 +0,0 @@
-
-# fidelity.py
-# Keep fidelity/importance generators small and explicit.
-import random
-
-def generate_fidelity_list_random(path_num: int, alpha: float = 0.95, beta: float = 0.85, variance: float = 0.04):
-    """
-    Generate `path_num` fidelities.
-    One "good" link around alpha, the rest around beta, clipped to [0.5, 1.0].
-    """
-    vals = []
-    for i in range(path_num):
-        mu = alpha if i == 0 else beta
-        # simple Gaussian around mu, but clipped
-        v = random.gauss(mu, variance**0.5)
-        v = max(0.5, min(1.0, v))
-        vals.append(v)
-    # shuffle so the "good" link is not always index 0
-    random.shuffle(vals)
-    return vals
-
-def generate_importance_list_random(n: int, low: float = 0.5, high: float = 2.0):
-    """Return a list of n importances I_n ~ Uniform[low, high]."""
-    return [random.uniform(low, high) for _ in range(n)]

+ 0 - 105
add_linkselfie/main.py

@@ -1,105 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-main.py — evaluation.py の各種プロットを一括実行
-"""
-
-from multiprocessing.pool import Pool
-import os
-import random
-
-try:
-    from utils import set_random_seed
-except Exception:
-    def set_random_seed(seed: int = 12):
-        random.seed(seed)
-        try:
-            import numpy as np
-            np.random.seed(seed)
-        except Exception:
-            pass
-
-from evaluation import (
-    plot_accuracy_vs_budget,
-    plot_value_vs_used,
-    plot_value_vs_budget_target,
-    plot_widthsum_alllinks_vs_budget,
-    plot_minwidthsum_perpair_vs_budget,
-    plot_widthsum_alllinks_weighted_vs_budget,
-    plot_minwidthsum_perpair_weighted_vs_budget,
-)
-
-def main():
-    set_random_seed(12)
-    num_workers      = max(1, (os.cpu_count() or 4) // 2)
-    noise_model_list = ["Depolar"]
-    scheduler_names  = ["LNaive", "Greedy"]
-    node_path_list   = [5, 5, 5]
-    importance_list  = [0.3, 0.6, 0.9]
-    budget_list      = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000]
-    bounces          = (1, 2, 3, 4)
-    repeat           = 10
-    delta            = 0.1
-
-    print("=== Config ===")
-    print(f"workers={num_workers}, noise_models={noise_model_list}")
-    print(f"schedulers={scheduler_names}")
-    print(f"node_path_list={node_path_list}, importance_list={importance_list}")
-    print(f"budgets={budget_list}, bounces={bounces}, repeat={repeat}, delta={delta}")
-    print("================\n")
-
-    p = Pool(processes=num_workers)
-    jobs = []
-
-    for noise_model in noise_model_list:
-        jobs.append(p.apply_async(
-            plot_accuracy_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"verbose": True}
-        ))
-        jobs.append(p.apply_async(
-            plot_value_vs_used,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"verbose": True}
-        ))
-        jobs.append(p.apply_async(
-            plot_value_vs_budget_target,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"verbose": True}
-        ))
-        jobs.append(p.apply_async(
-            plot_widthsum_alllinks_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-        jobs.append(p.apply_async(
-            plot_minwidthsum_perpair_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-        jobs.append(p.apply_async(
-            plot_widthsum_alllinks_weighted_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-        jobs.append(p.apply_async(
-            plot_minwidthsum_perpair_weighted_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-
-    p.close(); p.join()
-    for j in jobs: j.get()
-
-    print("\nAll jobs finished.")
-    print("Pickles -> ./outputs/,  PDF -> カレントディレクトリ に保存されます。")
-
-if __name__ == "__main__":
-    main()

+ 0 - 145
add_linkselfie/mainold.py

@@ -1,145 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-main.py — evaluation.py の各種プロットを一括実行
-  Unweighted:
-    1) plot_accuracy_vs_budget
-    2) plot_value_vs_used
-    3) plot_value_vs_budget_target
-    4) plot_widthsum_alllinks_vs_budget
-    5) plot_minwidthsum_perpair_vs_budget
-  Weighted(重要度 I_d を幅に掛ける版):
-    6) plot_widthsum_alllinks_weighted_vs_budget
-    7) plot_minwidthsum_perpair_weighted_vs_budget
-
-出力:
-  - 生データ pickle -> ./outputs/
-  - 図 PDF -> カレントディレクトリ
-"""
-
-from multiprocessing.pool import Pool
-import os
-import random
-
-# 任意:あなたの環境のユーティリティ。無ければフォールバック。
-try:
-    from utils import set_random_seed
-except Exception:
-    def set_random_seed(seed: int = 12):
-        random.seed(seed)
-        try:
-            import numpy as np
-            np.random.seed(seed)
-        except Exception:
-            pass
-
-# evaluation 側のプロット関数
-from evaluation import (
-    # Accuracy / Value 系
-    plot_accuracy_vs_budget,
-    plot_value_vs_used,
-    plot_value_vs_budget_target,
-
-    # 幅(UB-LB)系 - Unweighted
-    plot_widthsum_alllinks_vs_budget,
-    plot_minwidthsum_perpair_vs_budget,
-
-    # 幅(UB-LB)系 - Weighted (× I_d)
-    plot_widthsum_alllinks_weighted_vs_budget,
-    plot_minwidthsum_perpair_weighted_vs_budget,
-)
-
-
-def main():
-    # ===== 実験パラメータ =====
-    set_random_seed(12)
-    num_workers      = max(1, (os.cpu_count() or 4) // 2)
-    noise_model_list = ["Depolar"]              # 例: ["Depolar", "Dephase"]
-    scheduler_names  = ["LNaive", "Greedy"]     # 実装済みスケジューラ名に合わせて
-    node_path_list   = [5, 5, 5]                # ペアごとのリンク本数
-    importance_list  = [0.3, 0.6, 0.9]          # value系&weighted幅系で使用
-    budget_list      = [3000, 6000, 9000, 12000, 15000, 18000]
-    bounces          = (1, 2, 3, 4)             # 測定深さ候補(あなたの定義に従う)
-    repeat           = 10                       # 反復回数(精度と時間のトレードオフ)
-    delta            = 0.1                      # 幅用の信頼度パラメータ(Hoeffding)
-
-    print("=== Config ===")
-    print(f"workers={num_workers}, noise_models={noise_model_list}")
-    print(f"schedulers={scheduler_names}")
-    print(f"node_path_list={node_path_list}, importance_list={importance_list}")
-    print(f"budgets={budget_list}, bounces={bounces}, repeat={repeat}, delta={delta}")
-    print("================\n")
-
-    # ===== 実行キュー =====
-    p = Pool(processes=num_workers)
-    jobs = []
-
-    for noise_model in noise_model_list:
-        # --- Accuracy ---
-        jobs.append(p.apply_async(
-            plot_accuracy_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"verbose": True}
-        ))
-
-        # --- Value: x=used(実コスト平均) ---
-        jobs.append(p.apply_async(
-            plot_value_vs_used,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"verbose": True}
-        ))
-
-        # --- Value: x=target(指定予算) ---
-        jobs.append(p.apply_async(
-            plot_value_vs_budget_target,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"verbose": True}
-        ))
-
-        # --- Width (UB-LB) Unweighted: 全リンク総和 ---
-        jobs.append(p.apply_async(
-            plot_widthsum_alllinks_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-
-        # --- Width (UB-LB) Unweighted: ペア最小幅の総和 ---
-        jobs.append(p.apply_async(
-            plot_minwidthsum_perpair_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-
-        # --- Width (UB-LB) Weighted: 全リンク I_d·幅 総和 ---
-        jobs.append(p.apply_async(
-            plot_widthsum_alllinks_weighted_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-
-        # --- Width (UB-LB) Weighted: ペアごとの I_d·最小幅 総和 ---
-        jobs.append(p.apply_async(
-            plot_minwidthsum_perpair_weighted_vs_budget,
-            args=(budget_list, scheduler_names, noise_model,
-                  node_path_list, importance_list, bounces, repeat),
-            kwds={"delta": delta, "verbose": True}
-        ))
-
-    # ===== 実行 & 同期 =====
-    p.close()
-    p.join()
-    for j in jobs:
-        j.get()
-
-    print("\nAll jobs finished.")
-    print("Pickles -> ./outputs/,  PDF -> カレントディレクトリ に保存されます。")
-
-
-if __name__ == "__main__":
-    main()

+ 0 - 154
add_linkselfie/memo.org

@@ -1,154 +0,0 @@
-** ミーティング
-*** 対話の引用部分
-ノードS〜ノードD 間のリンク集合 L だけでなく、
-ノードS〜ノードD_n間のリンク集合 L_n (1 <= n <= N, N はノードSの隣接ノード数)がそれぞれ入力される。
-ノードS〜ノードD_n間の重要度 I_n (0〜1 の値) が入力として与えられる。
-総バウンスコスト C が入力として与えられる。
-この時、ノードS〜ノードD_n のノードペアの中で、
-K 個のノードペア (S, D_s_1), (S, D_s_2), ... (S, D_s_K) における
-忠実度が最大のリンクをそれぞれ発見する。
-ただし、発見するリンクのノードペア数 K は、
-重要度と忠実度の積の総和、つまり
-I = \sum_{k = 1}^K I_s_k * F_s_k
-が最大となるように定める。
-ここで F_n は (S, D_n) 間において忠実度の最大のリンクの忠実度である。
-
-A. この拡張問題は、複数のノードペア (S, Dₙ) に対して、限られたリソース(バウンスコスト)の中で、重要なノードペアの
-高忠実度リンクを選定する最適化問題です。以下に、この問題の形式的な定義
-を記述します。
-
-*** 考えたこと
-- 先生のストーリー案に関するchatGPTとの対話をもう1度読みなおした
-- 現状の評価指標では、 **発見するリンクのノードペア数 K** は、重要度と忠
-  実度の積の総和、つまりI =\sum_{k = 1}^K I_s_k * F_s_kが最大となるよ
-  うに定める。という部分の **発見するリンク** の定義をしていない。
-- societyではリンク価値を定義し、それとバウンスの積をとることでその価
-  値スコアを最大化する問題として解いていた
-  - しかしこの方法では発見するリンクを動的に決めることはできない
-  - また、評価指標を最大化するような分配方法が最も価値が高いリンクに全
-    測定予算を配ることであり、これは測定精度と資源削減のトレードオフを
-    考えていると言えない
-
-- そこで、これらの問題を解決する評価指標として信頼区間幅UB,LBを用いた
-  評価指標を導入する案を考えた。(UB,LBは推定忠実度の上限、下限)
-- 将来的には信頼区間幅がある閾値 x よりも小さいリンクを何本発見できた
-  かというような指標を考えている
-  
-解くべき問題
-ストーリー
-指標
-
-指標に関しては3章を書いてみせる
-
-
-
-同じような問題を解いている人がいればその手法を使う
-多腕バンディッド、それ以外でも
-
-まずストーリーをアップデートする
-
-オーバーヘッドどうするか
-これに関しては言わないといけない
-忠実度のゆらぎがどの程度でおこるかを考える必要がある
-
-- IEEE ICC 2026 の論文提出締切は 2025年9月29日。
-  - 論文の質はおいといて先に6ページの論文を書く
-  - 何点でもいいから期限には間にあわせる
-
-- 今ある結果、ストーリーで6枚書く
-
-
-
-
-計画
-23くらいまでにストーリーをきめる?
-
-
-逆順で
-
-
-
-29に6ページ(それ以降に校閲依頼)
-先生にはストーリーの校閲依頼
-
-足りないもの
-
-
-ミーティングをこまかく
-週1ではたりない。こまかく短く
-*** 9/26までに絶対必要なものを埋める
-アルゴリズム すぐおわる shun
-アルゴリズムの前提条件 制約条件など、どういう環境を仮定しているか
--> 時間かかりそう shun
-
-
-関連研究(引用した論文) shun
--> 時間かかる
-オーバーヘッドの話(必要不必要関係なく) 証拠集めに時間かかる
--> 証拠集めなしで一旦書く linkselfieの何倍くらいの時間がかかるか
-shun
-
-# 関連研究からやっていく
-# 過去の論文をみて参考にする
-# 15-25こ。1ページ前後
-1章で軽く触れる程度
-
-
-linkselfieに何書いているか読む
-linkselfieの関連研究を読む->まねできるところを探す
-GPTを1から10まで使わない
-論文を読む
-その後みずに書く
-重要度の研究があればみる
-
-追加で必要なところを考える
-
-
-yutoさん
-アブストラクト すぐおわる
-量子ネットワークの説明 すぐおわる
-IEEEiccのフォーマットにする すぐおわる
-関連研究(引用した論文) 整形
-アルゴリズムの前提条件 制約条件など、どういう環境を仮定しているか
--> 時間かかりそう 整形
-アルゴリズム すぐおわる 整形
-まとめと今後の課題 すぐおわる
-
-
-*** 9/29までに絶対に6ページにする
-問題設定の図、実験に使用するトポロジの図
-アルゴリズムの表
-リサーチクエスチョン
-シンボルの定義(あってもなくても)
-
-*** それ以降
-評価指標を新しくした実験
-ストーリーのアップデート
-
-
-
-何をやるべきか
-1 今の論文に書かれていないものを埋める(足りないもの)
-2 6ページにするためにはどうするべきか考える
-.
-.
-.
-3 ストーリー、それに合わせた実験結果をより良いものに変える
-
-
-
-章構成
-1 はじめに
-2 関連研究
-3 アルゴリズムの前提条件
-4 アルゴリズム
-5 実験
-6 まとめと考察
-
-何を伝えたいか
-今回だとストーリー、量子ネットワーク、問題設定、linkselfieとの違い
-アルゴリズムの中身
-
-linkselfieと2,3本他の論文を読む
-
-これらを文量で区切る

+ 0 - 37
add_linkselfie/memo.org~

@@ -1,37 +0,0 @@
-** ミーティング
-*** 対話の引用部分
-ノードS〜ノードD 間のリンク集合 L だけでなく、
-ノードS〜ノードD_n間のリンク集合 L_n (1 <= n <= N, N はノードSの隣接ノード数)がそれぞれ入力される。
-ノードS〜ノードD_n間の重要度 I_n (0〜1 の値) が入力として与えられる。
-総バウンスコスト C が入力として与えられる。
-この時、ノードS〜ノードD_n のノードペアの中で、
-K 個のノードペア (S, D_s_1), (S, D_s_2), ... (S, D_s_K) における
-忠実度が最大のリンクをそれぞれ発見する。
-ただし、発見するリンクのノードペア数 K は、
-重要度と忠実度の積の総和、つまり
-I = \sum_{k = 1}^K I_s_k * F_s_k
-が最大となるように定める。
-ここで F_n は (S, D_n) 間において忠実度の最大のリンクの忠実度である。
-
-A. この拡張問題は、複数のノードペア (S, Dₙ) に対して、限られたリソース(バウンスコスト)の中で、重要なノードペアの
-高忠実度リンクを選定する最適化問題です。以下に、この問題の形式的な定義
-を記述します。
-
-*** 考えたこと
-- 先生のストーリー案に関するchatGPTとの対話をもう1度読みなおした
-- 現状の評価指標では、 **発見するリンクのノードペア数 K** は、重要度と忠
-  実度の積の総和、つまりI =\sum_{k = 1}^K I_s_k * F_s_kが最大となるよ
-  うに定める。という部分の **発見するリンク** の定義をしていない。
-- societyではリンク価値を定義し、それとバウンスの積をとることでその価
-  値スコアを最大化する問題として解いていた
-  - しかしこの方法では発見するリンクを動的に決めることはできない
-  - また、評価指標を最大化するような分配方法が最も価値が高いリンクに全
-    測定予算を配ることであり、これは測定精度と資源削減のトレードオフを
-    考えていると言えない
-
-- そこで、これらの問題を解決する評価指標として信頼区間幅UB,LBを用いた
-  評価指標を導入する案を考えた。(UB,LBは推定忠実度の上限、下限)
-- 将来的には信頼区間幅がある閾値 x よりも小さいリンクを何本発見できた
-  かというような指標を考えている
-  
-  

+ 0 - 158
add_linkselfie/memo.txt

@@ -1,158 +0,0 @@
-linkselfieのストーリー
-
-背景:量子ネットワークでは長距離伝送にエンタングルメントを用いるが、ノイズでリンク品質(忠実度)が劣化するため、送信前にリンク品質の検証(Network Benchmarking)が必要。ただし複数リンクを一様に測るとコスト(バウンス・反復)が大きい。
-
-動機:実運用で使うのは少数の高忠実度リンクだけ。低忠実度リンクを高精度に測るのは無駄なので、悪いリンクを早期に捨て、資源消費を抑えて良いリンクだけを精密化したい(しかもベンチマークはバッチ型で柔軟に1回ずつ引けない)。
-
-目的:最小限の量子資源で最良リンクを高信頼で特定し、その忠実度を十分な
-精度で推定するアルゴリズムの設計・保証・実証。具体的には LinkSelFiE を
-提案し、新しい信頼区間に基づく段階的淘汰で資源削減を図り、正しさとコス
-ト上界を理論立証し、シミュレーションで有効性を示す。
-
-
-
-自分の研究のストーリー
-背景 量子ネットワークにおいて忠実度を高いリンクを効率的に判定する手法 LinkSelFiE が提案されている
-動機 LinkSelFiE は通信需要を考慮していないが、現実には通信需要が高くかつ忠実度の高いリンクの判定が望まれる
-目的 少ない計測 (バウンス) により利用率 x 忠実度が高いリンクの判定を可能とする
-
-
-
-確認
-LinkSelFiEの思想は、悪い候補にはほぼ費やさず、有望候補群にメリハリ配分
-し、必要ならε-最適やTop-Kで止めること。
--> つまり、最良のリンクを特定することよりも、忠実度が上位のリンクに資
-源を割くことが目的?
-読む(関連研究)
-linkselfieを引用している論文があったら読む
-作者の論文
-
-上位のリンク数本を測定するのが大事
-なぜかを調べる
-
-
-
-- 実装予定の評価指標とそのストーリー
-- 精度と測定資源の削減のトレードオフの正解を信頼度deltaによって決める
-
-
-
-
-- 実装予定のベースライン手法
-- uniform-linkselfieよりも少し賢い手法
-  - 全てのリンクをまとめてlinkselfieを適用する手法
-  - 重要度と忠実度の積に対してlinkselfieを行なう
-
-- 信頼度deltaとリンクの本数からノードペア間に割く測定資源の上限を設定
-  - 上限に達するとlinkselfieを打ち切る
-
-
-- 重要度に比例して傾斜的に資源を割りあてる手法
-  - ノードペア間では等しく配分
-
-
-
-目的を考えて答えがでるときと出ないときがある
-実験やったら理解が深まるかも
-両方やる
-
-性能の良さで勝負するのは厳しい
-
-自分の研究の売りは何か
-通信需要を考えた条件を最初に示したのが売り
-
-評価指標はこだわる必要がある
--> だれも評価したことないから
-
-その他の論文を読んだほうがいいか->yes
-読んでから考えたほうがいいか->わからない
-
-ネットワークアーキテクチャの論文も読むべき
-幅広く読むべき、深く読むべき ものによる
-
-どこに使えるか
-
-読者が読んだときに何をインパクトとして受けとるかを考えて書く必要がある
-自分でアピールする
-
--> 通信需要を考えた条件を最初に示したのが売り
-
-ストーリーもそのようにする必要がある
-
-
-
-- linkselfieは通信需要を考慮した環境でも良かった。
-
-
-
-
-
-
-
-理想的な配分が何かを考える必要がある
-
-宛先ノードがいくつかあり、それぞれの通信需要が分かっているときの理想的
-な測定資源の配分の仕方って何?
-
-- まず、入力として何がある?
-  - どれくらいの粒度で測定をするかを表わす、信頼度delta
-  - 真の忠実度のlist
-  - 重要度のlist
-価値レグレット
-使えると判断する基準は?
-linkselfieだと信頼度を満たすと判断するが、ベースラインのvanillaなどは全リンク均等配分するためそのような機能は備わっていない
-
-
-
-
-
-PAC停止
-各ペアiの最良リンクにおいて
-UCB,LCBを求める(信頼区間の上限、下限)
-重要度Iをかけて、
-UB = I_i x UCB_i , LB = I_i x LCB_iを求める
-
-LBが負になる場合はKを自動決定するが、そうはならないので
-実際はUBとLBの差を監視する
-
-Coverage
-
-リンクごとの忠実度の信頼区間の下限LCB_ij
-ノードの重要度I_i
-としたときに
-閾値dを越えるI_i x LCB_ijを何本獲得できたか
-
-
-価値レグレット
-coverageの閾値を考慮しないver
-値としてsum(I_i x LCB_ij)を計算し、真値のsum(I_i x F*_ij)と比較する
-選ばれるリンクはペア間で最大の忠実度のリンクのみ
-
-この3つに言えることだが、ベースラインはどのような手法を想定している?
-
-
-** ミーティング
-ノードS〜ノードD 間のリンク集合 L だけでなく、
-ノードS〜ノードD_n間のリンク集合 L_n (1 <= n <= N, N はノードSの隣接ノード数)がそれぞれ入力される。
-ノードS〜ノードD_n間の重要度 I_n (0〜1 の値) が入力として与えられる。
-総バウンスコスト C が入力として与えられる。
-この時、ノードS〜ノードD_n のノードペアの中で、
-K 個のノードペア (S, D_s_1), (S, D_s_2), ... (S, D_s_K) における
-忠実度が最大のリンクをそれぞれ発見する。
-ただし、発見するリンクのノードペア数 K は、
-重要度と忠実度の積の総和、つまり
-I = \sum_{k = 1}^K I_s_k * F_s_k
-が最大となるように定める。
-ここで F_n は (S, D_n) 間において忠実度の最大のリンクの忠実度である。
-
-A. この拡張問題は、複数のノードペア (S, Dₙ) に対して、限られたリソース(バウンスコスト)の中で、重要なノードペアの
-高忠実度リンクを選定する最適化問題です。以下に、この問題の形式的な定義
-を記述します。
-
-
-
-- 先生のストーリー案に関するchatGPTとの対話をもう1度読みなおした
-- 現状の評価指標では
-発見するリンクのノードペア数 K は、重要度と忠実度の積の総和、つまりI =
-\sum_{k = 1}^K I_s_k * F_s_kが最大となるように定める。
-ん

+ 0 - 108
add_linkselfie/memo.txt~

@@ -1,108 +0,0 @@
-linkselfieのストーリー
-
-背景:量子ネットワークでは長距離伝送にエンタングルメントを用いるが、ノイズでリンク品質(忠実度)が劣化するため、送信前にリンク品質の検証(Network Benchmarking)が必要。ただし複数リンクを一様に測るとコスト(バウンス・反復)が大きい。
-
-動機:実運用で使うのは少数の高忠実度リンクだけ。低忠実度リンクを高精度に測るのは無駄なので、悪いリンクを早期に捨て、資源消費を抑えて良いリンクだけを精密化したい(しかもベンチマークはバッチ型で柔軟に1回ずつ引けない)。
-
-目的:最小限の量子資源で最良リンクを高信頼で特定し、その忠実度を十分な
-精度で推定するアルゴリズムの設計・保証・実証。具体的には LinkSelFiE を
-提案し、新しい信頼区間に基づく段階的淘汰で資源削減を図り、正しさとコス
-ト上界を理論立証し、シミュレーションで有効性を示す。
-
-
-
-自分の研究のストーリー
-背景 量子ネットワークにおいて忠実度を高いリンクを効率的に判定する手法 LinkSelFiE が提案されている
-動機 LinkSelFiE は通信需要を考慮していないが、現実には通信需要が高くかつ忠実度の高いリンクの判定が望まれる
-目的 少ない計測 (バウンス) により利用率 x 忠実度が高いリンクの判定を可能とする
-
-
-
-確認
-LinkSelFiEの思想は、悪い候補にはほぼ費やさず、有望候補群にメリハリ配分
-し、必要ならε-最適やTop-Kで止めること。
--> つまり、最良のリンクを特定することよりも、忠実度が上位のリンクに資
-源を割くことが目的?
-読む(関連研究)
-linkselfieを引用している論文があったら読む
-作者の論文
-
-上位のリンク数本を測定するのが大事
-なぜかを調べる
-
-
-
-- 実装予定の評価指標とそのストーリー
-- 精度と測定資源の削減のトレードオフの正解を信頼度deltaによって決める
-
-
-
-
-- 実装予定のベースライン手法
-- uniform-linkselfieよりも少し賢い手法
-  - 全てのリンクをまとめてlinkselfieを適用する手法
-  - 重要度と忠実度の積に対してlinkselfieを行なう
-
-- 信頼度deltaとリンクの本数からノードペア間に割く測定資源の上限を設定
-  - 上限に達するとlinkselfieを打ち切る
-
-
-- 重要度に比例して傾斜的に資源を割りあてる手法
-  - ノードペア間では等しく配分
-
-
-
-目的を考えて答えがでるときと出ないときがある
-実験やったら理解が深まるかも
-両方やる
-
-性能の良さで勝負するのは厳しい
-
-自分の研究の売りは何か
-通信需要を考えた条件を最初に示したのが売り
-
-評価指標はこだわる必要がある
--> だれも評価したことないから
-
-その他の論文を読んだほうがいいか->yes
-読んでから考えたほうがいいか->わからない
-
-ネットワークアーキテクチャの論文も読むべき
-幅広く読むべき、深く読むべき ものによる
-
-どこに使えるか
-
-読者が読んだときに何をインパクトとして受けとるかを考えて書く必要がある
-自分でアピールする
-
--> 通信需要を考えた条件を最初に示したのが売り
-
-ストーリーもそのようにする必要がある
-
-
-
-- linkselfieは通信需要を考慮した環境でも良かった。
-
-
-
-
-
-
-
-理想的な配分が何かを考える必要がある
-
-宛先ノードがいくつかあり、それぞれの通信需要が分かっているときの理想的
-な測定資源の配分の仕方って何?
-
-- まず、入力として何がある?
-  - どれくらいの粒度で測定をするかを表わす、信頼度delta
-  - 真の忠実度のlist
-  - 重要度のlist
-価値レグレット
-使えると判断する基準は?
-linkselfieだと信頼度を満たすと判断するが、ベースラインのvanillaなどは全リンク均等配分するためそのような機能は備わっていない
-
-価値レグレット
-PAC停止
-Coverage
-この3つに言えることだが、ベースラインはどのような手法を想定している?

BIN
add_linkselfie/outputs/plot_accuracy_vs_budget_Depolar.pickle


BIN
add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean.pickle


+ 0 - 361
add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean_gaps.csv

@@ -1,361 +0,0 @@
-scheduler,budget,trial_idx,gap
-LNaive,3000,0,-0.11525232662572615
-LNaive,3000,1,-0.1162814635310172
-LNaive,3000,2,-0.11551019498327286
-LNaive,3000,3,-0.11578460042548988
-LNaive,3000,4,-0.1168991513304547
-LNaive,3000,5,-0.11474698114272386
-LNaive,3000,6,-0.1149732776703486
-LNaive,3000,7,-0.11275637404632133
-LNaive,3000,8,-0.11624163708121649
-LNaive,3000,9,-0.11386484800630003
-LNaive,3000,10,-0.1141671071919208
-LNaive,3000,11,-0.11651576074743486
-LNaive,3000,12,-0.11533031241959402
-LNaive,3000,13,-0.11406256440593054
-LNaive,3000,14,-0.11511433446313446
-LNaive,3000,15,-0.11491153484099459
-LNaive,3000,16,-0.11517702893674042
-LNaive,3000,17,-0.115155554276961
-LNaive,3000,18,-0.11647905932954128
-LNaive,3000,19,-0.11332103593833731
-LNaive,3000,20,-0.11477871976194176
-LNaive,3000,21,-0.11528005898928106
-LNaive,3000,22,-0.11521974859314899
-LNaive,3000,23,-0.11526682916739273
-LNaive,3000,24,-0.11483393167587086
-LNaive,3000,25,-0.11604973240066618
-LNaive,3000,26,-0.11601627167916018
-LNaive,3000,27,-0.114985013454029
-LNaive,3000,28,-0.11336483428764055
-LNaive,3000,29,-0.11411357085127172
-LNaive,6000,0,-0.0841005360323932
-LNaive,6000,1,-0.08621707049238236
-LNaive,6000,2,-0.08424618665895633
-LNaive,6000,3,-0.0846580036392438
-LNaive,6000,4,-0.08515169675730139
-LNaive,6000,5,-0.08412673466135523
-LNaive,6000,6,-0.08414399156347752
-LNaive,6000,7,-0.08563398710270842
-LNaive,6000,8,-0.08557804900585941
-LNaive,6000,9,-0.08592465761716472
-LNaive,6000,10,-0.08516015640535635
-LNaive,6000,11,-0.08493867000214128
-LNaive,6000,12,-0.0834270604883891
-LNaive,6000,13,-0.08534575371117525
-LNaive,6000,14,-0.08588272535691965
-LNaive,6000,15,-0.08566092504444123
-LNaive,6000,16,-0.08546618602363454
-LNaive,6000,17,-0.08472149074039848
-LNaive,6000,18,-0.08390470306169406
-LNaive,6000,19,-0.08592953990866403
-LNaive,6000,20,-0.0862950470474011
-LNaive,6000,21,-0.08360568447223948
-LNaive,6000,22,-0.08549599663335405
-LNaive,6000,23,-0.08626749428211888
-LNaive,6000,24,-0.08522178341432995
-LNaive,6000,25,-0.0853256400320902
-LNaive,6000,26,-0.08643554782011542
-LNaive,6000,27,-0.08794586367634438
-LNaive,6000,28,-0.08469812816726541
-LNaive,6000,29,-0.08400907490427356
-LNaive,9000,0,-0.06706930005792333
-LNaive,9000,1,-0.06719176031548779
-LNaive,9000,2,-0.0684709438202904
-LNaive,9000,3,-0.06526765680635682
-LNaive,9000,4,-0.06597777344512967
-LNaive,9000,5,-0.0690357695007413
-LNaive,9000,6,-0.06666184166686062
-LNaive,9000,7,-0.06727799111214816
-LNaive,9000,8,-0.06683021786656196
-LNaive,9000,9,-0.0675058540768938
-LNaive,9000,10,-0.06762111961691641
-LNaive,9000,11,-0.06760298351413008
-LNaive,9000,12,-0.0684646825918297
-LNaive,9000,13,-0.06785821136729331
-LNaive,9000,14,-0.06630217842030295
-LNaive,9000,15,-0.0671758398499519
-LNaive,9000,16,-0.06720014082133308
-LNaive,9000,17,-0.07061517634833425
-LNaive,9000,18,-0.06591584405708872
-LNaive,9000,19,-0.06552229393064446
-LNaive,9000,20,-0.06964788160131419
-LNaive,9000,21,-0.06770778314660286
-LNaive,9000,22,-0.06715073505593405
-LNaive,9000,23,-0.0702255609532243
-LNaive,9000,24,-0.06591741798791095
-LNaive,9000,25,-0.0676873643746464
-LNaive,9000,26,-0.06831537974291146
-LNaive,9000,27,-0.06584309963344004
-LNaive,9000,28,-0.06671158189922122
-LNaive,9000,29,-0.06761545160257132
-LNaive,12000,0,-0.058544923313416874
-LNaive,12000,1,-0.0564324189099501
-LNaive,12000,2,-0.05570096070632857
-LNaive,12000,3,-0.05701798378695777
-LNaive,12000,4,-0.059127581879105495
-LNaive,12000,5,-0.05753998376584213
-LNaive,12000,6,-0.055641875480117675
-LNaive,12000,7,-0.056243623653545405
-LNaive,12000,8,-0.0564885341703798
-LNaive,12000,9,-0.05571491508741133
-LNaive,12000,10,-0.05650324486447911
-LNaive,12000,11,-0.055944163300091665
-LNaive,12000,12,-0.05556887076565664
-LNaive,12000,13,-0.0552965184267189
-LNaive,12000,14,-0.057779288869098444
-LNaive,12000,15,-0.05812085236132314
-LNaive,12000,16,-0.056774091781738534
-LNaive,12000,17,-0.0572377077723768
-LNaive,12000,18,-0.05688329776379164
-LNaive,12000,19,-0.05675254283776998
-LNaive,12000,20,-0.05832459128184264
-LNaive,12000,21,-0.056173721560785594
-LNaive,12000,22,-0.05603652110529811
-LNaive,12000,23,-0.05755345005175694
-LNaive,12000,24,-0.057432033501015556
-LNaive,12000,25,-0.05756115349741153
-LNaive,12000,26,-0.05678748000019085
-LNaive,12000,27,-0.05701627416768262
-LNaive,12000,28,-0.057073974994542476
-LNaive,12000,29,-0.056437265574215245
-LNaive,15000,0,-0.047672376362878466
-LNaive,15000,1,-0.04800210022407547
-LNaive,15000,2,-0.048009178582940515
-LNaive,15000,3,-0.0483873824271015
-LNaive,15000,4,-0.04767025796132285
-LNaive,15000,5,-0.04840726885609048
-LNaive,15000,6,-0.04824012171791159
-LNaive,15000,7,-0.04762405459173136
-LNaive,15000,8,-0.04569789757979936
-LNaive,15000,9,-0.04884641068564055
-LNaive,15000,10,-0.046753112035364364
-LNaive,15000,11,-0.049306713662000456
-LNaive,15000,12,-0.04749899609083441
-LNaive,15000,13,-0.04933460452722516
-LNaive,15000,14,-0.04809651975305984
-LNaive,15000,15,-0.048866158389188286
-LNaive,15000,16,-0.048927504785606146
-LNaive,15000,17,-0.048297909005402095
-LNaive,15000,18,-0.047648622195405776
-LNaive,15000,19,-0.0484728125748366
-LNaive,15000,20,-0.048318519485654576
-LNaive,15000,21,-0.04640452480886925
-LNaive,15000,22,-0.04842256683320367
-LNaive,15000,23,-0.04766644195600522
-LNaive,15000,24,-0.047751177164039094
-LNaive,15000,25,-0.04758827844113278
-LNaive,15000,26,-0.047939594991386336
-LNaive,15000,27,-0.04779434446450437
-LNaive,15000,28,-0.04802690095298723
-LNaive,15000,29,-0.04807078261011166
-LNaive,18000,0,-0.032853286748915744
-LNaive,18000,1,-0.03418379525230364
-LNaive,18000,2,-0.03394494499343936
-LNaive,18000,3,-0.033421307790617694
-LNaive,18000,4,-0.03358032910982034
-LNaive,18000,5,-0.03382791801647267
-LNaive,18000,6,-0.03405689568888248
-LNaive,18000,7,-0.034103410294123004
-LNaive,18000,8,-0.0340902131841454
-LNaive,18000,9,-0.03337322388675612
-LNaive,18000,10,-0.032826055889372374
-LNaive,18000,11,-0.03420217061564934
-LNaive,18000,12,-0.03430192498654694
-LNaive,18000,13,-0.033503860045569045
-LNaive,18000,14,-0.033743993642237236
-LNaive,18000,15,-0.034135627731535445
-LNaive,18000,16,-0.03382506984465916
-LNaive,18000,17,-0.03394311674167716
-LNaive,18000,18,-0.033849207773852696
-LNaive,18000,19,-0.03340023513988866
-LNaive,18000,20,-0.034720460160830524
-LNaive,18000,21,-0.03317398784244974
-LNaive,18000,22,-0.0332714915853215
-LNaive,18000,23,-0.03336753496022027
-LNaive,18000,24,-0.032926973808218185
-LNaive,18000,25,-0.03387107645633225
-LNaive,18000,26,-0.03468662898161545
-LNaive,18000,27,-0.03365161711823117
-LNaive,18000,28,-0.03435181982281277
-LNaive,18000,29,-0.034640462016360396
-Greedy,3000,0,-0.13972885159784498
-Greedy,3000,1,-0.14034605332069394
-Greedy,3000,2,-0.14222176541377274
-Greedy,3000,3,-0.13730469489094288
-Greedy,3000,4,-0.14195098410593676
-Greedy,3000,5,-0.13841492950744827
-Greedy,3000,6,-0.1417665025440915
-Greedy,3000,7,-0.13798968728963912
-Greedy,3000,8,-0.13913067608052476
-Greedy,3000,9,-0.1386051693112944
-Greedy,3000,10,-0.13894445652895393
-Greedy,3000,11,-0.1405976825143198
-Greedy,3000,12,-0.13791351102737792
-Greedy,3000,13,-0.14147759957365036
-Greedy,3000,14,-0.13723457652718618
-Greedy,3000,15,-0.14087892350280573
-Greedy,3000,16,-0.14322930090272812
-Greedy,3000,17,-0.13968190601225539
-Greedy,3000,18,-0.14232023409252026
-Greedy,3000,19,-0.14175713921761934
-Greedy,3000,20,-0.11712853789760669
-Greedy,3000,21,-0.1375519374580083
-Greedy,3000,22,-0.13838329388443016
-Greedy,3000,23,-0.13973747276280013
-Greedy,3000,24,-0.1412203266764549
-Greedy,3000,25,-0.1405011383854513
-Greedy,3000,26,-0.13988077948611188
-Greedy,3000,27,-0.14203808226222536
-Greedy,3000,28,-0.13950840235603978
-Greedy,3000,29,-0.14128578764165245
-Greedy,6000,0,-0.11153394389339488
-Greedy,6000,1,-0.11396050551986503
-Greedy,6000,2,-0.11508176648113
-Greedy,6000,3,-0.11078939040912561
-Greedy,6000,4,-0.14669178929687576
-Greedy,6000,5,-0.11466077429821564
-Greedy,6000,6,-0.11206348125757748
-Greedy,6000,7,-0.1122377469457726
-Greedy,6000,8,-0.11266616908974758
-Greedy,6000,9,-0.11094962484282556
-Greedy,6000,10,-0.11012340547175625
-Greedy,6000,11,-0.11268742908310891
-Greedy,6000,12,-0.11134789633012943
-Greedy,6000,13,-0.11275115642668665
-Greedy,6000,14,-0.11117280853820655
-Greedy,6000,15,-0.1106404490516778
-Greedy,6000,16,-0.11278521398689756
-Greedy,6000,17,-0.1117494280602862
-Greedy,6000,18,-0.11267254080859734
-Greedy,6000,19,-0.11552334361393146
-Greedy,6000,20,-0.11226041004876319
-Greedy,6000,21,-0.11285737629616
-Greedy,6000,22,-0.11463586699809913
-Greedy,6000,23,-0.11254464777146078
-Greedy,6000,24,-0.11315247285159957
-Greedy,6000,25,-0.1126776845576275
-Greedy,6000,26,-0.11304603368735144
-Greedy,6000,27,-0.1135223163960758
-Greedy,6000,28,-0.11184227872505503
-Greedy,6000,29,-0.11241831040780104
-Greedy,9000,0,-0.05635942914212411
-Greedy,9000,1,-0.12871584117599266
-Greedy,9000,2,-0.12358776461761789
-Greedy,9000,3,-0.123588955150426
-Greedy,9000,4,-0.1249622273437897
-Greedy,9000,5,-0.11909401523497332
-Greedy,9000,6,-0.12449521799636443
-Greedy,9000,7,-0.11201844953877593
-Greedy,9000,8,-0.13173879309935765
-Greedy,9000,9,-0.05923231702737575
-Greedy,9000,10,-0.1240797682219108
-Greedy,9000,11,-0.11650005548961129
-Greedy,9000,12,-0.12747199970455636
-Greedy,9000,13,-0.1276055034320146
-Greedy,9000,14,-0.16188917715111983
-Greedy,9000,15,-0.12525166086280348
-Greedy,9000,16,-0.15828018873202465
-Greedy,9000,17,-0.1175759938812947
-Greedy,9000,18,-0.12833326014515456
-Greedy,9000,19,-0.054764187845526724
-Greedy,9000,20,-0.12607590425629844
-Greedy,9000,21,-0.1272488448096214
-Greedy,9000,22,-0.16147048361380278
-Greedy,9000,23,-0.12810088932015862
-Greedy,9000,24,-0.124236503051245
-Greedy,9000,25,-0.05553219269360121
-Greedy,9000,26,-0.11806766743196473
-Greedy,9000,27,-0.12446542042103581
-Greedy,9000,28,-0.12328559363120684
-Greedy,9000,29,-0.1111795893215336
-Greedy,12000,0,-0.1930888148248897
-Greedy,12000,1,-0.05998076175176803
-Greedy,12000,2,-0.059750513529616356
-Greedy,12000,3,-0.06187789014393785
-Greedy,12000,4,-0.06350897176162162
-Greedy,12000,5,-0.10505559495463734
-Greedy,12000,6,-0.13707939871625952
-Greedy,12000,7,-0.057487326605518585
-Greedy,12000,8,-0.06064680216385787
-Greedy,12000,9,-0.05899421010565287
-Greedy,12000,10,-0.05726160492011312
-Greedy,12000,11,-0.0604986161014357
-Greedy,12000,12,-0.06011962363584489
-Greedy,12000,13,-0.1908792022205701
-Greedy,12000,14,-0.19311673887293423
-Greedy,12000,15,-0.19447442947315374
-Greedy,12000,16,-0.059453330440368046
-Greedy,12000,17,-0.0583286002289467
-Greedy,12000,18,-0.10331965689820344
-Greedy,12000,19,-0.0587044736478648
-Greedy,12000,20,-0.06400022446562215
-Greedy,12000,21,-0.19327415600277176
-Greedy,12000,22,-0.19199589793696425
-Greedy,12000,23,-0.05935788373404394
-Greedy,12000,24,-0.1377102064404213
-Greedy,12000,25,-0.1966478918984882
-Greedy,12000,26,-0.05961027885731929
-Greedy,12000,27,-0.057459771157465256
-Greedy,12000,28,-0.19290196548834726
-Greedy,12000,29,-0.06263305432051758
-Greedy,15000,0,-0.23126025208020984
-Greedy,15000,1,-0.23492341367629344
-Greedy,15000,2,-0.23021164465557353
-Greedy,15000,3,-0.23673855644032638
-Greedy,15000,4,-0.23536371927650213
-Greedy,15000,5,-0.23712638077406467
-Greedy,15000,6,-0.2330413695876632
-Greedy,15000,7,-0.2362474377319943
-Greedy,15000,8,-0.2369419891606781
-Greedy,15000,9,-0.2362920807243145
-Greedy,15000,10,-0.23071436538951495
-Greedy,15000,11,-0.23268439569417043
-Greedy,15000,12,-0.2338379985460275
-Greedy,15000,13,-0.23126249685987554
-Greedy,15000,14,-0.23671306044504714
-Greedy,15000,15,-0.23476257525954303
-Greedy,15000,16,-0.23751241843630666
-Greedy,15000,17,-0.23515862596671222
-Greedy,15000,18,-0.23410215652451627
-Greedy,15000,19,-0.2351207579382888
-Greedy,15000,20,-0.23537082565950773
-Greedy,15000,21,-0.2288632004206177
-Greedy,15000,22,-0.2352139854779185
-Greedy,15000,23,-0.23474802670120054
-Greedy,15000,24,-0.2325459870309965
-Greedy,15000,25,-0.23047709382312642
-Greedy,15000,26,-0.2325880923804713
-Greedy,15000,27,-0.23360252919336544
-Greedy,15000,28,-0.2317248261301279
-Greedy,15000,29,-0.23513847633271578
-Greedy,18000,0,-0.050689208924951434
-Greedy,18000,1,-0.056101789460940465
-Greedy,18000,2,-0.05529292106849526
-Greedy,18000,3,-0.05391095888297415
-Greedy,18000,4,-0.05527537706978358
-Greedy,18000,5,-0.052915763891636325
-Greedy,18000,6,-0.0558971866029323
-Greedy,18000,7,-0.05703460124299209
-Greedy,18000,8,-0.05288260392024191
-Greedy,18000,9,-0.05093272799809889
-Greedy,18000,10,-0.05263941352796553
-Greedy,18000,11,-0.051274958729431885
-Greedy,18000,12,-0.056588230393473604
-Greedy,18000,13,-0.0546173629584874
-Greedy,18000,14,-0.054917809480469204
-Greedy,18000,15,-0.05488966386120864
-Greedy,18000,16,-0.05468005286249589
-Greedy,18000,17,-0.05564583177657112
-Greedy,18000,18,-0.052896441173686415
-Greedy,18000,19,-0.0520832701538575
-Greedy,18000,20,-0.05656531040640855
-Greedy,18000,21,-0.055960760962019185
-Greedy,18000,22,-0.054168852935967626
-Greedy,18000,23,-0.0539751839138527
-Greedy,18000,24,-0.054601688810618554
-Greedy,18000,25,-0.05212513798841467
-Greedy,18000,26,-0.052075793093082634
-Greedy,18000,27,-0.050152668070174045
-Greedy,18000,28,-0.054503667998331506
-Greedy,18000,29,-0.05424090125946668

+ 0 - 361
add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean_widths.csv

@@ -1,361 +0,0 @@
-scheduler,budget,trial_idx,width
-LNaive,3000,0,0.11525232662572615
-LNaive,3000,1,0.1162814635310172
-LNaive,3000,2,0.11551019498327286
-LNaive,3000,3,0.11578460042548988
-LNaive,3000,4,0.1168991513304547
-LNaive,3000,5,0.1159610122199981
-LNaive,3000,6,0.11528069252961315
-LNaive,3000,7,0.11636592549352875
-LNaive,3000,8,0.11624163708121649
-LNaive,3000,9,0.11449053417658206
-LNaive,3000,10,0.1141671071919208
-LNaive,3000,11,0.11651576074743486
-LNaive,3000,12,0.11533031241959402
-LNaive,3000,13,0.11406256440593054
-LNaive,3000,14,0.11511433446313446
-LNaive,3000,15,0.1164360723048079
-LNaive,3000,16,0.11517702893674042
-LNaive,3000,17,0.115155554276961
-LNaive,3000,18,0.11647905932954128
-LNaive,3000,19,0.11526186523733606
-LNaive,3000,20,0.11477871976194176
-LNaive,3000,21,0.11528005898928106
-LNaive,3000,22,0.11521974859314899
-LNaive,3000,23,0.11526682916739273
-LNaive,3000,24,0.11530298686088636
-LNaive,3000,25,0.11604973240066618
-LNaive,3000,26,0.11688591154770078
-LNaive,3000,27,0.11564027166577495
-LNaive,3000,28,0.11336483428764055
-LNaive,3000,29,0.11538531644334404
-LNaive,6000,0,0.10932759623644239
-LNaive,6000,1,0.10846219128654844
-LNaive,6000,2,0.10897221189428774
-LNaive,6000,3,0.10909635266041495
-LNaive,6000,4,0.10896701313832002
-LNaive,6000,5,0.10865726786762431
-LNaive,6000,6,0.10838102933703975
-LNaive,6000,7,0.108597832139954
-LNaive,6000,8,0.10839571145855553
-LNaive,6000,9,0.10820103362767575
-LNaive,6000,10,0.10891781146742585
-LNaive,6000,11,0.1084302744714234
-LNaive,6000,12,0.10849499468914237
-LNaive,6000,13,0.1073518082503625
-LNaive,6000,14,0.10783214060939916
-LNaive,6000,15,0.10987542793703486
-LNaive,6000,16,0.10861012707864992
-LNaive,6000,17,0.10951492949340169
-LNaive,6000,18,0.11076623903497973
-LNaive,6000,19,0.10990138247773285
-LNaive,6000,20,0.1103045678726231
-LNaive,6000,21,0.11029383118283698
-LNaive,6000,22,0.10924568244691457
-LNaive,6000,23,0.10895468665177876
-LNaive,6000,24,0.1089340417161424
-LNaive,6000,25,0.10845268614982795
-LNaive,6000,26,0.10845176967150016
-LNaive,6000,27,0.10977908439011008
-LNaive,6000,28,0.10963331814710442
-LNaive,6000,29,0.1092650535674955
-LNaive,9000,0,0.0999288459113783
-LNaive,9000,1,0.0999288459113783
-LNaive,9000,2,0.0999288459113783
-LNaive,9000,3,0.0999288459113783
-LNaive,9000,4,0.0999288459113783
-LNaive,9000,5,0.0999288459113783
-LNaive,9000,6,0.0999288459113783
-LNaive,9000,7,0.0999288459113783
-LNaive,9000,8,0.0999288459113783
-LNaive,9000,9,0.0999288459113783
-LNaive,9000,10,0.0999288459113783
-LNaive,9000,11,0.0999288459113783
-LNaive,9000,12,0.0999288459113783
-LNaive,9000,13,0.0999288459113783
-LNaive,9000,14,0.0999288459113783
-LNaive,9000,15,0.0999288459113783
-LNaive,9000,16,0.0999288459113783
-LNaive,9000,17,0.0999288459113783
-LNaive,9000,18,0.0999288459113783
-LNaive,9000,19,0.0999288459113783
-LNaive,9000,20,0.0999288459113783
-LNaive,9000,21,0.0999288459113783
-LNaive,9000,22,0.0999288459113783
-LNaive,9000,23,0.0999288459113783
-LNaive,9000,24,0.0999288459113783
-LNaive,9000,25,0.0999288459113783
-LNaive,9000,26,0.0999288459113783
-LNaive,9000,27,0.0999288459113783
-LNaive,9000,28,0.0999288459113783
-LNaive,9000,29,0.0999288459113783
-LNaive,12000,0,0.0865409191301143
-LNaive,12000,1,0.0865409191301143
-LNaive,12000,2,0.0865409191301143
-LNaive,12000,3,0.0865409191301143
-LNaive,12000,4,0.0865409191301143
-LNaive,12000,5,0.0865409191301143
-LNaive,12000,6,0.0865409191301143
-LNaive,12000,7,0.0865409191301143
-LNaive,12000,8,0.0865409191301143
-LNaive,12000,9,0.0865409191301143
-LNaive,12000,10,0.0865409191301143
-LNaive,12000,11,0.0865409191301143
-LNaive,12000,12,0.0865409191301143
-LNaive,12000,13,0.0865409191301143
-LNaive,12000,14,0.0865409191301143
-LNaive,12000,15,0.0865409191301143
-LNaive,12000,16,0.0865409191301143
-LNaive,12000,17,0.0865409191301143
-LNaive,12000,18,0.0865409191301143
-LNaive,12000,19,0.0865409191301143
-LNaive,12000,20,0.0865409191301143
-LNaive,12000,21,0.0865409191301143
-LNaive,12000,22,0.0865409191301143
-LNaive,12000,23,0.0865409191301143
-LNaive,12000,24,0.0865409191301143
-LNaive,12000,25,0.0865409191301143
-LNaive,12000,26,0.0865409191301143
-LNaive,12000,27,0.0865409191301143
-LNaive,12000,28,0.0865409191301143
-LNaive,12000,29,0.0865409191301143
-LNaive,15000,0,0.07740455120409906
-LNaive,15000,1,0.07740455120409906
-LNaive,15000,2,0.07740455120409906
-LNaive,15000,3,0.07740455120409906
-LNaive,15000,4,0.07740455120409906
-LNaive,15000,5,0.07740455120409906
-LNaive,15000,6,0.07740455120409906
-LNaive,15000,7,0.07740455120409906
-LNaive,15000,8,0.07740455120409906
-LNaive,15000,9,0.07740455120409906
-LNaive,15000,10,0.07740455120409906
-LNaive,15000,11,0.07740455120409906
-LNaive,15000,12,0.07740455120409906
-LNaive,15000,13,0.07740455120409906
-LNaive,15000,14,0.07740455120409906
-LNaive,15000,15,0.07740455120409906
-LNaive,15000,16,0.07740455120409906
-LNaive,15000,17,0.07740455120409906
-LNaive,15000,18,0.07740455120409906
-LNaive,15000,19,0.07740455120409906
-LNaive,15000,20,0.07740455120409906
-LNaive,15000,21,0.07740455120409906
-LNaive,15000,22,0.07740455120409906
-LNaive,15000,23,0.07740455120409906
-LNaive,15000,24,0.07740455120409906
-LNaive,15000,25,0.07740455120409906
-LNaive,15000,26,0.07740455120409906
-LNaive,15000,27,0.07740455120409906
-LNaive,15000,28,0.07740455120409906
-LNaive,15000,29,0.07740455120409906
-LNaive,18000,0,0.07066036458008118
-LNaive,18000,1,0.07066036458008118
-LNaive,18000,2,0.07066036458008118
-LNaive,18000,3,0.07066036458008118
-LNaive,18000,4,0.07066036458008118
-LNaive,18000,5,0.07066036458008118
-LNaive,18000,6,0.07066036458008118
-LNaive,18000,7,0.07066036458008118
-LNaive,18000,8,0.07066036458008118
-LNaive,18000,9,0.07066036458008118
-LNaive,18000,10,0.07066036458008118
-LNaive,18000,11,0.07066036458008118
-LNaive,18000,12,0.07066036458008118
-LNaive,18000,13,0.07066036458008118
-LNaive,18000,14,0.07066036458008118
-LNaive,18000,15,0.07066036458008118
-LNaive,18000,16,0.07066036458008118
-LNaive,18000,17,0.07066036458008118
-LNaive,18000,18,0.07066036458008118
-LNaive,18000,19,0.07066036458008118
-LNaive,18000,20,0.07066036458008118
-LNaive,18000,21,0.07066036458008118
-LNaive,18000,22,0.07066036458008118
-LNaive,18000,23,0.07066036458008118
-LNaive,18000,24,0.07066036458008118
-LNaive,18000,25,0.07066036458008118
-LNaive,18000,26,0.07066036458008118
-LNaive,18000,27,0.07066036458008118
-LNaive,18000,28,0.07066036458008118
-LNaive,18000,29,0.07066036458008118
-Greedy,3000,0,0.13972885159784498
-Greedy,3000,1,0.14034605332069394
-Greedy,3000,2,0.14222176541377274
-Greedy,3000,3,0.13730469489094288
-Greedy,3000,4,0.14195098410593676
-Greedy,3000,5,0.13841492950744827
-Greedy,3000,6,0.1417665025440915
-Greedy,3000,7,0.13798968728963912
-Greedy,3000,8,0.13913067608052476
-Greedy,3000,9,0.1386051693112944
-Greedy,3000,10,0.13894445652895393
-Greedy,3000,11,0.1405976825143198
-Greedy,3000,12,0.13791351102737792
-Greedy,3000,13,0.14147759957365036
-Greedy,3000,14,0.13723457652718618
-Greedy,3000,15,0.14087892350280573
-Greedy,3000,16,0.14322930090272812
-Greedy,3000,17,0.13968190601225539
-Greedy,3000,18,0.14232023409252026
-Greedy,3000,19,0.14175713921761934
-Greedy,3000,20,0.1985449814889242
-Greedy,3000,21,0.1375519374580083
-Greedy,3000,22,0.13838329388443016
-Greedy,3000,23,0.13973747276280013
-Greedy,3000,24,0.1412203266764549
-Greedy,3000,25,0.1405011383854513
-Greedy,3000,26,0.13988077948611188
-Greedy,3000,27,0.14203808226222536
-Greedy,3000,28,0.13950840235603978
-Greedy,3000,29,0.14128578764165245
-Greedy,6000,0,0.11153394389339488
-Greedy,6000,1,0.11396050551986503
-Greedy,6000,2,0.11508176648113
-Greedy,6000,3,0.11078939040912561
-Greedy,6000,4,0.14669178929687576
-Greedy,6000,5,0.11466077429821564
-Greedy,6000,6,0.11206348125757748
-Greedy,6000,7,0.1122377469457726
-Greedy,6000,8,0.11266616908974758
-Greedy,6000,9,0.11094962484282556
-Greedy,6000,10,0.11012340547175625
-Greedy,6000,11,0.11268742908310891
-Greedy,6000,12,0.11134789633012943
-Greedy,6000,13,0.11275115642668665
-Greedy,6000,14,0.11117280853820655
-Greedy,6000,15,0.1106404490516778
-Greedy,6000,16,0.11278521398689756
-Greedy,6000,17,0.1117494280602862
-Greedy,6000,18,0.11267254080859734
-Greedy,6000,19,0.11552334361393146
-Greedy,6000,20,0.11226041004876319
-Greedy,6000,21,0.11285737629616
-Greedy,6000,22,0.11463586699809913
-Greedy,6000,23,0.11254464777146078
-Greedy,6000,24,0.11315247285159957
-Greedy,6000,25,0.1126776845576275
-Greedy,6000,26,0.11304603368735144
-Greedy,6000,27,0.1135223163960758
-Greedy,6000,28,0.11184227872505503
-Greedy,6000,29,0.11241831040780104
-Greedy,9000,0,0.08518162692547071
-Greedy,9000,1,0.12871584117599266
-Greedy,9000,2,0.12586602169965377
-Greedy,9000,3,0.12598361147119053
-Greedy,9000,4,0.1249622273437897
-Greedy,9000,5,0.126116243821207
-Greedy,9000,6,0.12567986330128278
-Greedy,9000,7,0.12707342031390922
-Greedy,9000,8,0.13173879309935765
-Greedy,9000,9,0.08589341533195305
-Greedy,9000,10,0.12408435513252403
-Greedy,9000,11,0.12501834788990362
-Greedy,9000,12,0.12747199970455636
-Greedy,9000,13,0.1276055034320146
-Greedy,9000,14,0.16188917715111983
-Greedy,9000,15,0.12525166086280348
-Greedy,9000,16,0.16098308686124574
-Greedy,9000,17,0.12685358753612241
-Greedy,9000,18,0.12833326014515456
-Greedy,9000,19,0.08569848270542912
-Greedy,9000,20,0.12607590425629844
-Greedy,9000,21,0.1289058505101922
-Greedy,9000,22,0.16147048361380278
-Greedy,9000,23,0.12879991574980443
-Greedy,9000,24,0.124236503051245
-Greedy,9000,25,0.08585390897464806
-Greedy,9000,26,0.12701857296605257
-Greedy,9000,27,0.1262037498186791
-Greedy,9000,28,0.12328559363120684
-Greedy,9000,29,0.12528904997230494
-Greedy,12000,0,0.1930888148248897
-Greedy,12000,1,0.3155130837249338
-Greedy,12000,2,0.30804036376375343
-Greedy,12000,3,0.3018982739337064
-Greedy,12000,4,0.3084790118002494
-Greedy,12000,5,0.11804083395861986
-Greedy,12000,6,0.13707939871625952
-Greedy,12000,7,0.30202517837753096
-Greedy,12000,8,0.31827555729594437
-Greedy,12000,9,0.3038793945275169
-Greedy,12000,10,0.3136640242651634
-Greedy,12000,11,0.3094809254256423
-Greedy,12000,12,0.3077201806906986
-Greedy,12000,13,0.1908792022205701
-Greedy,12000,14,0.19311673887293423
-Greedy,12000,15,0.19447442947315374
-Greedy,12000,16,0.3134409816178185
-Greedy,12000,17,0.3088134791827799
-Greedy,12000,18,0.11804083395861986
-Greedy,12000,19,0.3090275838487564
-Greedy,12000,20,0.31573471555147936
-Greedy,12000,21,0.19327415600277176
-Greedy,12000,22,0.19199589793696425
-Greedy,12000,23,0.30794807475060937
-Greedy,12000,24,0.1377102064404213
-Greedy,12000,25,0.1966478918984882
-Greedy,12000,26,0.3134751287471791
-Greedy,12000,27,0.30838522826550996
-Greedy,12000,28,0.19290196548834726
-Greedy,12000,29,0.30968852314951323
-Greedy,15000,0,0.23126025208020984
-Greedy,15000,1,0.23492341367629344
-Greedy,15000,2,0.23021164465557353
-Greedy,15000,3,0.23673855644032638
-Greedy,15000,4,0.23536371927650213
-Greedy,15000,5,0.23712638077406467
-Greedy,15000,6,0.2330413695876632
-Greedy,15000,7,0.2362474377319943
-Greedy,15000,8,0.2369419891606781
-Greedy,15000,9,0.2362920807243145
-Greedy,15000,10,0.23071436538951495
-Greedy,15000,11,0.23268439569417043
-Greedy,15000,12,0.2338379985460275
-Greedy,15000,13,0.23126249685987554
-Greedy,15000,14,0.23671306044504714
-Greedy,15000,15,0.23476257525954303
-Greedy,15000,16,0.23751241843630666
-Greedy,15000,17,0.23515862596671222
-Greedy,15000,18,0.23410215652451627
-Greedy,15000,19,0.2351207579382888
-Greedy,15000,20,0.23537082565950773
-Greedy,15000,21,0.2288632004206177
-Greedy,15000,22,0.2352139854779185
-Greedy,15000,23,0.23474802670120054
-Greedy,15000,24,0.2325459870309965
-Greedy,15000,25,0.23047709382312642
-Greedy,15000,26,0.2325880923804713
-Greedy,15000,27,0.23360252919336544
-Greedy,15000,28,0.2317248261301279
-Greedy,15000,29,0.23513847633271578
-Greedy,18000,0,0.11061872049694188
-Greedy,18000,1,0.11048140610325285
-Greedy,18000,2,0.11065394379213722
-Greedy,18000,3,0.11158379616921321
-Greedy,18000,4,0.11157870964516314
-Greedy,18000,5,0.11006474486469597
-Greedy,18000,6,0.11050065660051456
-Greedy,18000,7,0.10931543381465725
-Greedy,18000,8,0.10883607012040142
-Greedy,18000,9,0.11176431459637459
-Greedy,18000,10,0.10976390010085313
-Greedy,18000,11,0.11109237071368538
-Greedy,18000,12,0.11148197256728276
-Greedy,18000,13,0.11057031727656141
-Greedy,18000,14,0.11111217204593771
-Greedy,18000,15,0.1103834583025346
-Greedy,18000,16,0.11084119866422582
-Greedy,18000,17,0.11292208783999824
-Greedy,18000,18,0.10999460641144176
-Greedy,18000,19,0.10974921357072198
-Greedy,18000,20,0.11259690725875815
-Greedy,18000,21,0.11103490292749008
-Greedy,18000,22,0.11054951807438129
-Greedy,18000,23,0.11029148500597008
-Greedy,18000,24,0.11233528060480236
-Greedy,18000,25,0.11172566963433095
-Greedy,18000,26,0.11139550431060741
-Greedy,18000,27,0.11152009108624361
-Greedy,18000,28,0.10973915963007563
-Greedy,18000,29,0.11084653077096396

BIN
add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean.pickle


+ 0 - 361
add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean_gaps.csv

@@ -1,361 +0,0 @@
-scheduler,budget,trial_idx,gap
-LNaive,3000,0,-0.11525232625795723
-LNaive,3000,1,-0.1162814635380931
-LNaive,3000,2,-0.11551019510829863
-LNaive,3000,3,-0.11578460032602256
-LNaive,3000,4,-0.1168991513005484
-LNaive,3000,5,-0.11474698109340775
-LNaive,3000,6,-0.11497327736693819
-LNaive,3000,7,-0.11275637400928828
-LNaive,3000,8,-0.11624163715605995
-LNaive,3000,9,-0.11386484827034127
-LNaive,3000,10,-0.11416710728552326
-LNaive,3000,11,-0.11651576085099058
-LNaive,3000,12,-0.11533031247191894
-LNaive,3000,13,-0.11406256413918703
-LNaive,3000,14,-0.11511433453208864
-LNaive,3000,15,-0.11491153507306862
-LNaive,3000,16,-0.11517702896500737
-LNaive,3000,17,-0.11515555434496139
-LNaive,3000,18,-0.11647905931956459
-LNaive,3000,19,-0.11332103610047217
-LNaive,3000,20,-0.11477871981890231
-LNaive,3000,21,-0.11528005898978089
-LNaive,3000,22,-0.11521974853001682
-LNaive,3000,23,-0.11526682919731202
-LNaive,3000,24,-0.11483393188245583
-LNaive,3000,25,-0.11604973245452976
-LNaive,3000,26,-0.11601627167684603
-LNaive,3000,27,-0.11498501355879442
-LNaive,3000,28,-0.11336483399843922
-LNaive,3000,29,-0.11411357081000006
-LNaive,6000,0,-0.08410053607175727
-LNaive,6000,1,-0.08621707049825411
-LNaive,6000,2,-0.08424618675234807
-LNaive,6000,3,-0.08465800379733879
-LNaive,6000,4,-0.08515169687302315
-LNaive,6000,5,-0.08412673466893905
-LNaive,6000,6,-0.08414399169494835
-LNaive,6000,7,-0.08563398717853521
-LNaive,6000,8,-0.08557804895502297
-LNaive,6000,9,-0.08592465738069044
-LNaive,6000,10,-0.08516015630053952
-LNaive,6000,11,-0.0849386698781548
-LNaive,6000,12,-0.08342706071341288
-LNaive,6000,13,-0.08534575371908071
-LNaive,6000,14,-0.0858827255771959
-LNaive,6000,15,-0.08566092505053668
-LNaive,6000,16,-0.08546618601739919
-LNaive,6000,17,-0.08472149067973589
-LNaive,6000,18,-0.08390470326136512
-LNaive,6000,19,-0.08592953965469963
-LNaive,6000,20,-0.08629504718703163
-LNaive,6000,21,-0.08360568438788851
-LNaive,6000,22,-0.08549599662260277
-LNaive,6000,23,-0.08626749446333892
-LNaive,6000,24,-0.08522178337115482
-LNaive,6000,25,-0.08532563980022778
-LNaive,6000,26,-0.08643554785645013
-LNaive,6000,27,-0.08794586362430934
-LNaive,6000,28,-0.08469812807472832
-LNaive,6000,29,-0.08400907489567622
-LNaive,9000,0,-0.06706929999458755
-LNaive,9000,1,-0.06719176019389805
-LNaive,9000,2,-0.06847094382512631
-LNaive,9000,3,-0.06526765674904389
-LNaive,9000,4,-0.0659777734064041
-LNaive,9000,5,-0.06903576955642243
-LNaive,9000,6,-0.06666184163014321
-LNaive,9000,7,-0.06727799114826416
-LNaive,9000,8,-0.06683021782917309
-LNaive,9000,9,-0.06750585414715649
-LNaive,9000,10,-0.06762111958611605
-LNaive,9000,11,-0.06760298362193295
-LNaive,9000,12,-0.06846468246680504
-LNaive,9000,13,-0.06785821138656167
-LNaive,9000,14,-0.06630217831537122
-LNaive,9000,15,-0.06717584001673715
-LNaive,9000,16,-0.0672001407748789
-LNaive,9000,17,-0.07061517636075754
-LNaive,9000,18,-0.06591584415634077
-LNaive,9000,19,-0.06552229395447795
-LNaive,9000,20,-0.06964788144933887
-LNaive,9000,21,-0.0677077831833327
-LNaive,9000,22,-0.06715073505720526
-LNaive,9000,23,-0.07022556113748535
-LNaive,9000,24,-0.06591741786876026
-LNaive,9000,25,-0.06768736428949107
-LNaive,9000,26,-0.06831537963798129
-LNaive,9000,27,-0.0658430998057028
-LNaive,9000,28,-0.06671158206955563
-LNaive,9000,29,-0.0676154516658154
-LNaive,12000,0,-0.058544923367648716
-LNaive,12000,1,-0.056432418818347374
-LNaive,12000,2,-0.05570096088439569
-LNaive,12000,3,-0.05701798371636757
-LNaive,12000,4,-0.05912758199447987
-LNaive,12000,5,-0.057539983657202476
-LNaive,12000,6,-0.055641875494184645
-LNaive,12000,7,-0.05624362366780444
-LNaive,12000,8,-0.056488534177082994
-LNaive,12000,9,-0.05571491498873815
-LNaive,12000,10,-0.056503244937608166
-LNaive,12000,11,-0.055944163268356606
-LNaive,12000,12,-0.05556887071511152
-LNaive,12000,13,-0.055296518386941385
-LNaive,12000,14,-0.057779288815738794
-LNaive,12000,15,-0.05812085229700059
-LNaive,12000,16,-0.056774091783305836
-LNaive,12000,17,-0.057237707740509514
-LNaive,12000,18,-0.0568832978738385
-LNaive,12000,19,-0.05675254283354847
-LNaive,12000,20,-0.05832459125442091
-LNaive,12000,21,-0.05617372161817846
-LNaive,12000,22,-0.05603652121303271
-LNaive,12000,23,-0.057553450050187305
-LNaive,12000,24,-0.057432033544936756
-LNaive,12000,25,-0.05756115356322988
-LNaive,12000,26,-0.05678748001755618
-LNaive,12000,27,-0.057016274124603195
-LNaive,12000,28,-0.05707397496610889
-LNaive,12000,29,-0.056437265566944284
-LNaive,15000,0,-0.0476723763507807
-LNaive,15000,1,-0.04800210017035045
-LNaive,15000,2,-0.048009178567418154
-LNaive,15000,3,-0.04838738248920671
-LNaive,15000,4,-0.047670257985745645
-LNaive,15000,5,-0.048407268872576736
-LNaive,15000,6,-0.04824012166374636
-LNaive,15000,7,-0.047624054752827494
-LNaive,15000,8,-0.04569789758371845
-LNaive,15000,9,-0.0488464106442873
-LNaive,15000,10,-0.046753112029086275
-LNaive,15000,11,-0.049306713651953604
-LNaive,15000,12,-0.047498996255456616
-LNaive,15000,13,-0.04933460449762839
-LNaive,15000,14,-0.048096519895612255
-LNaive,15000,15,-0.04886615828379437
-LNaive,15000,16,-0.04892750482224917
-LNaive,15000,17,-0.04829790907395792
-LNaive,15000,18,-0.047648622313834044
-LNaive,15000,19,-0.04847281255448166
-LNaive,15000,20,-0.04831851947376897
-LNaive,15000,21,-0.04640452463499645
-LNaive,15000,22,-0.04842256674786194
-LNaive,15000,23,-0.04766644205552828
-LNaive,15000,24,-0.04775117722367006
-LNaive,15000,25,-0.04758827844628
-LNaive,15000,26,-0.04793959509230683
-LNaive,15000,27,-0.04779434457972864
-LNaive,15000,28,-0.04802690104292051
-LNaive,15000,29,-0.048070782592939176
-LNaive,18000,0,-0.03285328678983923
-LNaive,18000,1,-0.03418379515034442
-LNaive,18000,2,-0.0339449449691307
-LNaive,18000,3,-0.03342130772811447
-LNaive,18000,4,-0.033580329045689306
-LNaive,18000,5,-0.033827917991081535
-LNaive,18000,6,-0.03405689578836968
-LNaive,18000,7,-0.03410341032338149
-LNaive,18000,8,-0.03409021316365868
-LNaive,18000,9,-0.03337322392319109
-LNaive,18000,10,-0.03282605584345222
-LNaive,18000,11,-0.03420217053309971
-LNaive,18000,12,-0.03430192489396677
-LNaive,18000,13,-0.03350386005781991
-LNaive,18000,14,-0.03374399354719615
-LNaive,18000,15,-0.03413562780217294
-LNaive,18000,16,-0.0338250698581779
-LNaive,18000,17,-0.03394311668420502
-LNaive,18000,18,-0.03384920772272282
-LNaive,18000,19,-0.033400235164445125
-LNaive,18000,20,-0.03472046021581898
-LNaive,18000,21,-0.0331739878321049
-LNaive,18000,22,-0.033271491491977834
-LNaive,18000,23,-0.03336753481857124
-LNaive,18000,24,-0.03292697387312438
-LNaive,18000,25,-0.03387107645718834
-LNaive,18000,26,-0.03468662894912011
-LNaive,18000,27,-0.03365161706293396
-LNaive,18000,28,-0.03435181990677738
-LNaive,18000,29,-0.03464046206926352
-Greedy,3000,0,-0.13972885170603044
-Greedy,3000,1,-0.1403460531063233
-Greedy,3000,2,-0.14222176546769427
-Greedy,3000,3,-0.13730469438911652
-Greedy,3000,4,-0.14195098438465625
-Greedy,3000,5,-0.13841492932030186
-Greedy,3000,6,-0.14176650279367253
-Greedy,3000,7,-0.13798968768180853
-Greedy,3000,8,-0.1391306757469577
-Greedy,3000,9,-0.13860516987828952
-Greedy,3000,10,-0.13894445679573542
-Greedy,3000,11,-0.14059768257365923
-Greedy,3000,12,-0.13791351114537975
-Greedy,3000,13,-0.14147759937534
-Greedy,3000,14,-0.13723457683714946
-Greedy,3000,15,-0.1408789231610349
-Greedy,3000,16,-0.14322930079230134
-Greedy,3000,17,-0.1396819062510981
-Greedy,3000,18,-0.14232023392058712
-Greedy,3000,19,-0.1417571389699006
-Greedy,3000,20,-0.1171285375431651
-Greedy,3000,21,-0.13755193692423373
-Greedy,3000,22,-0.138383294236186
-Greedy,3000,23,-0.13973747257238278
-Greedy,3000,24,-0.14122032623008018
-Greedy,3000,25,-0.14050113825462685
-Greedy,3000,26,-0.13988077998529447
-Greedy,3000,27,-0.14203808259634454
-Greedy,3000,28,-0.1395084023371176
-Greedy,3000,29,-0.14128578783292234
-Greedy,6000,0,-0.11153394393534899
-Greedy,6000,1,-0.11396050550207326
-Greedy,6000,2,-0.11508176645044199
-Greedy,6000,3,-0.1107893906070111
-Greedy,6000,4,-0.14669178976529307
-Greedy,6000,5,-0.11466077434944222
-Greedy,6000,6,-0.11206348126442245
-Greedy,6000,7,-0.11223774697997224
-Greedy,6000,8,-0.11266616921821282
-Greedy,6000,9,-0.1109496251163633
-Greedy,6000,10,-0.11012340565875478
-Greedy,6000,11,-0.11268742915067953
-Greedy,6000,12,-0.11134789611679019
-Greedy,6000,13,-0.11275115668892077
-Greedy,6000,14,-0.11117280856121636
-Greedy,6000,15,-0.11064044914852111
-Greedy,6000,16,-0.1127852138836829
-Greedy,6000,17,-0.11174942800151866
-Greedy,6000,18,-0.1126725408528898
-Greedy,6000,19,-0.11552334392128416
-Greedy,6000,20,-0.11226041011771626
-Greedy,6000,21,-0.11285737662602191
-Greedy,6000,22,-0.11463586698814943
-Greedy,6000,23,-0.11254464799291009
-Greedy,6000,24,-0.11315247293928732
-Greedy,6000,25,-0.11267768487878838
-Greedy,6000,26,-0.11304603353529563
-Greedy,6000,27,-0.11352231627474585
-Greedy,6000,28,-0.11184227865145158
-Greedy,6000,29,-0.1124183101031665
-Greedy,9000,0,-0.056359429280879336
-Greedy,9000,1,-0.12871584118717483
-Greedy,9000,2,-0.12358776506982616
-Greedy,9000,3,-0.12358895541278903
-Greedy,9000,4,-0.12496222761761067
-Greedy,9000,5,-0.11909401550899301
-Greedy,9000,6,-0.12449521763918903
-Greedy,9000,7,-0.11201845002488353
-Greedy,9000,8,-0.13173879286181478
-Greedy,9000,9,-0.0592323169237885
-Greedy,9000,10,-0.12407976786054653
-Greedy,9000,11,-0.1165000556831346
-Greedy,9000,12,-0.12747199941587883
-Greedy,9000,13,-0.12760550300055973
-Greedy,9000,14,-0.1618891772911667
-Greedy,9000,15,-0.12525166064796
-Greedy,9000,16,-0.15828018897796425
-Greedy,9000,17,-0.1175759938093508
-Greedy,9000,18,-0.1283332600822188
-Greedy,9000,19,-0.054764187759823724
-Greedy,9000,20,-0.12607590454515305
-Greedy,9000,21,-0.1272488450100676
-Greedy,9000,22,-0.1614704834120304
-Greedy,9000,23,-0.1281008896565181
-Greedy,9000,24,-0.12423650295136868
-Greedy,9000,25,-0.055532192360656873
-Greedy,9000,26,-0.11806766763155152
-Greedy,9000,27,-0.12446542021876761
-Greedy,9000,28,-0.1232855937380547
-Greedy,9000,29,-0.11117958963077268
-Greedy,12000,0,-0.19308881528450206
-Greedy,12000,1,-0.0599807617167033
-Greedy,12000,2,-0.059750513567686014
-Greedy,12000,3,-0.06187789019571921
-Greedy,12000,4,-0.06350897192254523
-Greedy,12000,5,-0.10505559521947683
-Greedy,12000,6,-0.13707939870241326
-Greedy,12000,7,-0.05748732668709877
-Greedy,12000,8,-0.06064680214773366
-Greedy,12000,9,-0.05899421009277617
-Greedy,12000,10,-0.05726160491970289
-Greedy,12000,11,-0.060498616098134894
-Greedy,12000,12,-0.06011962362693779
-Greedy,12000,13,-0.19087920211304032
-Greedy,12000,14,-0.19311673857312606
-Greedy,12000,15,-0.19447442946484772
-Greedy,12000,16,-0.059453330403918536
-Greedy,12000,17,-0.05832860020131014
-Greedy,12000,18,-0.10331965680082511
-Greedy,12000,19,-0.05870447373951937
-Greedy,12000,20,-0.0640002245040584
-Greedy,12000,21,-0.19327415585791607
-Greedy,12000,22,-0.19199589785955018
-Greedy,12000,23,-0.05935788367481465
-Greedy,12000,24,-0.1377102059785391
-Greedy,12000,25,-0.19664789202727206
-Greedy,12000,26,-0.05961027895029569
-Greedy,12000,27,-0.05745977113669509
-Greedy,12000,28,-0.19290196529912862
-Greedy,12000,29,-0.06263305432690303
-Greedy,15000,0,-0.2312602521716809
-Greedy,15000,1,-0.2349234134006688
-Greedy,15000,2,-0.23021164479799228
-Greedy,15000,3,-0.2367385567651843
-Greedy,15000,4,-0.2353637192938116
-Greedy,15000,5,-0.23712638115319296
-Greedy,15000,6,-0.23304136923404895
-Greedy,15000,7,-0.23624743807182824
-Greedy,15000,8,-0.2369419891809903
-Greedy,15000,9,-0.23629208084019337
-Greedy,15000,10,-0.23071436531264133
-Greedy,15000,11,-0.23268439567971733
-Greedy,15000,12,-0.23383799902794755
-Greedy,15000,13,-0.23126249637732887
-Greedy,15000,14,-0.23671306016973603
-Greedy,15000,15,-0.23476257550352697
-Greedy,15000,16,-0.2375124181608883
-Greedy,15000,17,-0.23515862610237326
-Greedy,15000,18,-0.23410215682623603
-Greedy,15000,19,-0.23512075812304567
-Greedy,15000,20,-0.2353708258885725
-Greedy,15000,21,-0.22886320061158227
-Greedy,15000,22,-0.23521398524219417
-Greedy,15000,23,-0.23474802688206764
-Greedy,15000,24,-0.2325459868447648
-Greedy,15000,25,-0.23047709432490637
-Greedy,15000,26,-0.23258809228289512
-Greedy,15000,27,-0.23360252961171568
-Greedy,15000,28,-0.2317248261342022
-Greedy,15000,29,-0.23513847652516207
-Greedy,18000,0,-0.050689208742866976
-Greedy,18000,1,-0.05610178956226186
-Greedy,18000,2,-0.05529292089111382
-Greedy,18000,3,-0.05391095884112296
-Greedy,18000,4,-0.05527537718604414
-Greedy,18000,5,-0.05291576386440888
-Greedy,18000,6,-0.05589718674837563
-Greedy,18000,7,-0.05703460112363601
-Greedy,18000,8,-0.05288260378610621
-Greedy,18000,9,-0.05093272821328232
-Greedy,18000,10,-0.05263941358712931
-Greedy,18000,11,-0.05127495876849786
-Greedy,18000,12,-0.056588230391489636
-Greedy,18000,13,-0.054617362841273276
-Greedy,18000,14,-0.05491780932389867
-Greedy,18000,15,-0.05488966361316072
-Greedy,18000,16,-0.0546800527835567
-Greedy,18000,17,-0.055645831741658935
-Greedy,18000,18,-0.05289644107860947
-Greedy,18000,19,-0.052083270065799825
-Greedy,18000,20,-0.056565310358409615
-Greedy,18000,21,-0.055960761254003843
-Greedy,18000,22,-0.05416885311036168
-Greedy,18000,23,-0.053975184026726186
-Greedy,18000,24,-0.05460168862819825
-Greedy,18000,25,-0.052125137973311086
-Greedy,18000,26,-0.05207579322379363
-Greedy,18000,27,-0.050152668213635176
-Greedy,18000,28,-0.05450366795772399
-Greedy,18000,29,-0.05424090126270076

+ 0 - 361
add_linkselfie/outputs/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean_widths.csv

@@ -1,361 +0,0 @@
-scheduler,budget,trial_idx,width
-LNaive,3000,0,0.10199818777583403
-LNaive,3000,1,0.10278264569241251
-LNaive,3000,2,0.10176420116603713
-LNaive,3000,3,0.10282694630758364
-LNaive,3000,4,0.10279064148250232
-LNaive,3000,5,0.10277776759496826
-LNaive,3000,6,0.10210270036016507
-LNaive,3000,7,0.1031471849590984
-LNaive,3000,8,0.1024002046888266
-LNaive,3000,9,0.1017882775264906
-LNaive,3000,10,0.10224311092409925
-LNaive,3000,11,0.10205555748670576
-LNaive,3000,12,0.10217474547008669
-LNaive,3000,13,0.10193310815982648
-LNaive,3000,14,0.10229085996873961
-LNaive,3000,15,0.10269266453386443
-LNaive,3000,16,0.10237885319386242
-LNaive,3000,17,0.10252208368379023
-LNaive,3000,18,0.10295973270276544
-LNaive,3000,19,0.10215583541525537
-LNaive,3000,20,0.10128346498349146
-LNaive,3000,21,0.10230628083759459
-LNaive,3000,22,0.10194315194661407
-LNaive,3000,23,0.10274534117897664
-LNaive,3000,24,0.10192977589278862
-LNaive,3000,25,0.10289085423178275
-LNaive,3000,26,0.10325932183493214
-LNaive,3000,27,0.10236657888782259
-LNaive,3000,28,0.10150638089870638
-LNaive,3000,29,0.10241959226891233
-LNaive,6000,0,0.08918083710123621
-LNaive,6000,1,0.08955686212422913
-LNaive,6000,2,0.08960607985358167
-LNaive,6000,3,0.08973690909546599
-LNaive,6000,4,0.08935087171686358
-LNaive,6000,5,0.08957749616282817
-LNaive,6000,6,0.08924178745552298
-LNaive,6000,7,0.08952220312646952
-LNaive,6000,8,0.08920844029501844
-LNaive,6000,9,0.08946447569812414
-LNaive,6000,10,0.08989020379315742
-LNaive,6000,11,0.08944341839871343
-LNaive,6000,12,0.08914405428418444
-LNaive,6000,13,0.08942015103654981
-LNaive,6000,14,0.08964170525582453
-LNaive,6000,15,0.09004057255670155
-LNaive,6000,16,0.0896119196496672
-LNaive,6000,17,0.08953295702067095
-LNaive,6000,18,0.08996394807536297
-LNaive,6000,19,0.09007309521344857
-LNaive,6000,20,0.08996444739523395
-LNaive,6000,21,0.08958695396587217
-LNaive,6000,22,0.08939193399426455
-LNaive,6000,23,0.08947050691219323
-LNaive,6000,24,0.08971435085820345
-LNaive,6000,25,0.08979458668436047
-LNaive,6000,26,0.08912208060928566
-LNaive,6000,27,0.08997823907155682
-LNaive,6000,28,0.08995013708616728
-LNaive,6000,29,0.08973120075181547
-LNaive,9000,0,0.0748498449503822
-LNaive,9000,1,0.07431983976791841
-LNaive,9000,2,0.07452330241287904
-LNaive,9000,3,0.07487894325665641
-LNaive,9000,4,0.0745099879384435
-LNaive,9000,5,0.07487203544733385
-LNaive,9000,6,0.07474966321097698
-LNaive,9000,7,0.07432818348287262
-LNaive,9000,8,0.07473467480260432
-LNaive,9000,9,0.07456870473131982
-LNaive,9000,10,0.07459488903144906
-LNaive,9000,11,0.07457746165859698
-LNaive,9000,12,0.0743916613650359
-LNaive,9000,13,0.07479696502973594
-LNaive,9000,14,0.0746473377994018
-LNaive,9000,15,0.0745402179795089
-LNaive,9000,16,0.07438634745986457
-LNaive,9000,17,0.07426124374172305
-LNaive,9000,18,0.07481220023835129
-LNaive,9000,19,0.07467450176856778
-LNaive,9000,20,0.07455197685729135
-LNaive,9000,21,0.07463738328686553
-LNaive,9000,22,0.07497033327730603
-LNaive,9000,23,0.07424733406595534
-LNaive,9000,24,0.07470751995803981
-LNaive,9000,25,0.0746529054904274
-LNaive,9000,26,0.07412479530908063
-LNaive,9000,27,0.07433852568534505
-LNaive,9000,28,0.07462104119614459
-LNaive,9000,29,0.07480865153812662
-LNaive,12000,0,0.07025416522682996
-LNaive,12000,1,0.06955544881604385
-LNaive,12000,2,0.06977534554998173
-LNaive,12000,3,0.070226454124883
-LNaive,12000,4,0.07030073496843108
-LNaive,12000,5,0.07005157511592193
-LNaive,12000,6,0.0697290498664787
-LNaive,12000,7,0.06995698915128871
-LNaive,12000,8,0.06994842632259597
-LNaive,12000,9,0.06955070518482602
-LNaive,12000,10,0.07026358778194271
-LNaive,12000,11,0.06991838201775706
-LNaive,12000,12,0.06974890926252719
-LNaive,12000,13,0.06957344993393029
-LNaive,12000,14,0.06995965129016346
-LNaive,12000,15,0.07027649687405106
-LNaive,12000,16,0.06981098511378044
-LNaive,12000,17,0.07000925551737362
-LNaive,12000,18,0.06996212519365679
-LNaive,12000,19,0.06956058897815358
-LNaive,12000,20,0.07005591485204939
-LNaive,12000,21,0.07012861333231575
-LNaive,12000,22,0.06978457693169766
-LNaive,12000,23,0.07007191090549318
-LNaive,12000,24,0.06994950548310781
-LNaive,12000,25,0.06998343931838276
-LNaive,12000,26,0.07007996121111482
-LNaive,12000,27,0.06981279476269835
-LNaive,12000,28,0.06943957193402865
-LNaive,12000,29,0.06990941656847005
-LNaive,15000,0,0.06299726980006055
-LNaive,15000,1,0.06292151291291048
-LNaive,15000,2,0.0629426898350361
-LNaive,15000,3,0.06266619557864916
-LNaive,15000,4,0.06277044437623196
-LNaive,15000,5,0.06265088064062056
-LNaive,15000,6,0.06297031317388287
-LNaive,15000,7,0.06270740309316714
-LNaive,15000,8,0.06248483246320994
-LNaive,15000,9,0.06290670532414466
-LNaive,15000,10,0.06264643198675605
-LNaive,15000,11,0.06298203782441043
-LNaive,15000,12,0.0628210183578739
-LNaive,15000,13,0.06286991769858212
-LNaive,15000,14,0.06276808641243337
-LNaive,15000,15,0.0629914700957304
-LNaive,15000,16,0.06312851334007874
-LNaive,15000,17,0.0630118688467592
-LNaive,15000,18,0.06260022107163388
-LNaive,15000,19,0.06272416733162449
-LNaive,15000,20,0.06301929550926062
-LNaive,15000,21,0.06258816726796683
-LNaive,15000,22,0.0629825885326647
-LNaive,15000,23,0.06270470960251433
-LNaive,15000,24,0.06276325990161558
-LNaive,15000,25,0.06287025700437239
-LNaive,15000,26,0.06306170796381434
-LNaive,15000,27,0.06305973308944668
-LNaive,15000,28,0.06258443517139282
-LNaive,15000,29,0.06271271154573439
-LNaive,18000,0,0.06051332565787763
-LNaive,18000,1,0.06081526922350614
-LNaive,18000,2,0.06083427614877527
-LNaive,18000,3,0.06072347254276259
-LNaive,18000,4,0.06079547643139779
-LNaive,18000,5,0.06069543697841014
-LNaive,18000,6,0.060826022868405404
-LNaive,18000,7,0.0606575570108829
-LNaive,18000,8,0.060893597508170506
-LNaive,18000,9,0.060791964542773136
-LNaive,18000,10,0.06062959071712385
-LNaive,18000,11,0.060878919791841235
-LNaive,18000,12,0.06080423716453165
-LNaive,18000,13,0.06070068663150078
-LNaive,18000,14,0.06072085096839086
-LNaive,18000,15,0.06071687091480652
-LNaive,18000,16,0.06077964157989072
-LNaive,18000,17,0.06093118698196528
-LNaive,18000,18,0.06080334375794427
-LNaive,18000,19,0.06069538076796188
-LNaive,18000,20,0.0608482926064798
-LNaive,18000,21,0.06071804211199531
-LNaive,18000,22,0.060642749952236764
-LNaive,18000,23,0.06067182495328175
-LNaive,18000,24,0.060673921841431576
-LNaive,18000,25,0.06070140854360673
-LNaive,18000,26,0.060810705626123375
-LNaive,18000,27,0.06074470099727025
-LNaive,18000,28,0.060716686911185445
-LNaive,18000,29,0.06096131431648233
-Greedy,3000,0,0.1166279071912184
-Greedy,3000,1,0.11647896273266674
-Greedy,3000,2,0.11734728867470112
-Greedy,3000,3,0.11580392182733723
-Greedy,3000,4,0.11849079829782942
-Greedy,3000,5,0.11598169420777656
-Greedy,3000,6,0.1182207592275711
-Greedy,3000,7,0.11662574986144691
-Greedy,3000,8,0.11604164510883952
-Greedy,3000,9,0.11628759813617535
-Greedy,3000,10,0.11736349415795562
-Greedy,3000,11,0.11856500626314614
-Greedy,3000,12,0.11474286544572106
-Greedy,3000,13,0.11733191933326208
-Greedy,3000,14,0.11516186366336438
-Greedy,3000,15,0.11538562651029782
-Greedy,3000,16,0.11770217522768738
-Greedy,3000,17,0.11714889475178432
-Greedy,3000,18,0.11802773925547234
-Greedy,3000,19,0.11781206691034192
-Greedy,3000,20,0.13570626791565946
-Greedy,3000,21,0.11541079140428316
-Greedy,3000,22,0.1169532393898971
-Greedy,3000,23,0.11676514452735005
-Greedy,3000,24,0.11795102449975821
-Greedy,3000,25,0.11675108618881391
-Greedy,3000,26,0.1167857110204145
-Greedy,3000,27,0.11878518698064881
-Greedy,3000,28,0.11516627804092476
-Greedy,3000,29,0.11736201067873446
-Greedy,6000,0,0.09009095428591085
-Greedy,6000,1,0.09055693334851393
-Greedy,6000,2,0.0911639166417421
-Greedy,6000,3,0.08968241058086635
-Greedy,6000,4,0.10123420599875184
-Greedy,6000,5,0.09116329570191377
-Greedy,6000,6,0.09036102563958455
-Greedy,6000,7,0.0900018703713521
-Greedy,6000,8,0.09047981499105932
-Greedy,6000,9,0.0897939605276683
-Greedy,6000,10,0.08899989455566781
-Greedy,6000,11,0.08993071573004859
-Greedy,6000,12,0.08992024968478574
-Greedy,6000,13,0.09070041592682758
-Greedy,6000,14,0.08932328823143405
-Greedy,6000,15,0.08969333755236837
-Greedy,6000,16,0.08969816781322078
-Greedy,6000,17,0.08973353015295497
-Greedy,6000,18,0.09054414670655409
-Greedy,6000,19,0.0911472609259673
-Greedy,6000,20,0.08979557893364065
-Greedy,6000,21,0.09022209395471115
-Greedy,6000,22,0.09069316548501498
-Greedy,6000,23,0.08965934675464644
-Greedy,6000,24,0.09044595675329208
-Greedy,6000,25,0.09034010701559037
-Greedy,6000,26,0.09004474173052952
-Greedy,6000,27,0.09000130161557889
-Greedy,6000,28,0.09016038733316352
-Greedy,6000,29,0.09038769271066671
-Greedy,9000,0,0.062320825849886595
-Greedy,9000,1,0.07591824927107034
-Greedy,9000,2,0.07495774316730386
-Greedy,9000,3,0.07506392785669365
-Greedy,9000,4,0.07464662022720703
-Greedy,9000,5,0.0756074331307028
-Greedy,9000,6,0.07521805849188538
-Greedy,9000,7,0.07577848234586304
-Greedy,9000,8,0.07730249830972187
-Greedy,9000,9,0.06276344363214699
-Greedy,9000,10,0.07434511552144189
-Greedy,9000,11,0.07518425814762449
-Greedy,9000,12,0.07551446247835132
-Greedy,9000,13,0.07598781208742063
-Greedy,9000,14,0.08716478842692461
-Greedy,9000,15,0.07494810946000348
-Greedy,9000,16,0.0871432384276211
-Greedy,9000,17,0.07583489558341831
-Greedy,9000,18,0.07576363698629067
-Greedy,9000,19,0.06232804577291803
-Greedy,9000,20,0.07535510447408089
-Greedy,9000,21,0.07642193651869038
-Greedy,9000,22,0.08697713170967976
-Greedy,9000,23,0.07598518813596582
-Greedy,9000,24,0.07479214510684244
-Greedy,9000,25,0.06259438537761715
-Greedy,9000,26,0.07539343355386081
-Greedy,9000,27,0.07537088224713535
-Greedy,9000,28,0.07403131280055768
-Greedy,9000,29,0.07474192136823436
-Greedy,12000,0,0.09716226700410006
-Greedy,12000,1,0.13789120772359764
-Greedy,12000,2,0.13540030105902687
-Greedy,12000,3,0.13335293778302307
-Greedy,12000,4,0.13554651719533886
-Greedy,12000,5,0.08520109506098521
-Greedy,12000,6,0.07855025185283167
-Greedy,12000,7,0.1333952392564138
-Greedy,12000,8,0.13881203225116856
-Greedy,12000,9,0.13401331124596605
-Greedy,12000,10,0.13727485454724797
-Greedy,12000,11,0.13588048823348
-Greedy,12000,12,0.1352935732007786
-Greedy,12000,13,0.09642572928027948
-Greedy,12000,14,0.09717157476697473
-Greedy,12000,15,0.09762413839754862
-Greedy,12000,16,0.13720050719695923
-Greedy,12000,17,0.13565800619107873
-Greedy,12000,18,0.08462244892143463
-Greedy,12000,19,0.13572937440930954
-Greedy,12000,20,0.13796508500675117
-Greedy,12000,21,0.09722404719523807
-Greedy,12000,22,0.09679796119578277
-Greedy,12000,23,0.135369537981048
-Greedy,12000,24,0.07876052094487362
-Greedy,12000,25,0.09834862591835673
-Greedy,12000,26,0.13721188932326167
-Greedy,12000,27,0.13551525588678373
-Greedy,12000,28,0.09709998367564225
-Greedy,12000,29,0.13594968739455524
-Greedy,15000,0,0.11394814537671875
-Greedy,15000,1,0.11555025952831399
-Greedy,15000,2,0.1139763877357151
-Greedy,15000,3,0.11584848658347613
-Greedy,15000,4,0.115626276260634
-Greedy,15000,5,0.11675884850417262
-Greedy,15000,6,0.11494493278262759
-Greedy,15000,7,0.11606415498283273
-Greedy,15000,8,0.11676112953780031
-Greedy,15000,9,0.11593639742321393
-Greedy,15000,10,0.1142265038013816
-Greedy,15000,11,0.11524553700811165
-Greedy,15000,12,0.11519745802883359
-Greedy,15000,13,0.11405524333216821
-Greedy,15000,14,0.11639700572455906
-Greedy,15000,15,0.11557162424474354
-Greedy,15000,16,0.11639303768978297
-Greedy,15000,17,0.11601886817972613
-Greedy,15000,18,0.11524282162854038
-Greedy,15000,19,0.1157029464040847
-Greedy,15000,20,0.11564035872047033
-Greedy,15000,21,0.11387702746501908
-Greedy,15000,22,0.11563054970917068
-Greedy,15000,23,0.11508382801234436
-Greedy,15000,24,0.11481132493131052
-Greedy,15000,25,0.11419527257179962
-Greedy,15000,26,0.11474677730320544
-Greedy,15000,27,0.11495259528681549
-Greedy,15000,28,0.11456827469496766
-Greedy,15000,29,0.11567476677341888
-Greedy,18000,0,0.08506558862550324
-Greedy,18000,1,0.08592040899836266
-Greedy,18000,2,0.08602657597789017
-Greedy,18000,3,0.08648757283008768
-Greedy,18000,4,0.08632132953183262
-Greedy,18000,5,0.08530113683377527
-Greedy,18000,6,0.0855990695994751
-Greedy,18000,7,0.08549350676479588
-Greedy,18000,8,0.08512322767912106
-Greedy,18000,9,0.08585356006443212
-Greedy,18000,10,0.08535786960420348
-Greedy,18000,11,0.08545855634118926
-Greedy,18000,12,0.08638328351183788
-Greedy,18000,13,0.085329199493265
-Greedy,18000,14,0.08510510464180136
-Greedy,18000,15,0.08606131723542476
-Greedy,18000,16,0.08532142663653068
-Greedy,18000,17,0.08615646349371005
-Greedy,18000,18,0.0855825865656296
-Greedy,18000,19,0.08528238398044914
-Greedy,18000,20,0.08582647081450893
-Greedy,18000,21,0.08622365546838262
-Greedy,18000,22,0.08620812014116434
-Greedy,18000,23,0.08597723194751883
-Greedy,18000,24,0.085502355173531
-Greedy,18000,25,0.08576536461498219
-Greedy,18000,26,0.08696225661454442
-Greedy,18000,27,0.08642667827794244
-Greedy,18000,28,0.08504092455068057
-Greedy,18000,29,0.08532758390925095

BIN
add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_max.pickle


+ 0 - 361
add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_max_widths.csv

@@ -1,361 +0,0 @@
-scheduler,budget,trial_idx,width
-LNaive,3000,0,0.1730818382602286
-LNaive,3000,1,0.1730818382602286
-LNaive,3000,2,0.1730818382602286
-LNaive,3000,3,0.1730818382602286
-LNaive,3000,4,0.1730818382602286
-LNaive,3000,5,0.1730818382602286
-LNaive,3000,6,0.1730818382602286
-LNaive,3000,7,0.1730818382602286
-LNaive,3000,8,0.1730818382602286
-LNaive,3000,9,0.1730818382602286
-LNaive,3000,10,0.1730818382602286
-LNaive,3000,11,0.1730818382602286
-LNaive,3000,12,0.1730818382602286
-LNaive,3000,13,0.1730818382602286
-LNaive,3000,14,0.1730818382602286
-LNaive,3000,15,0.1730818382602286
-LNaive,3000,16,0.1730818382602286
-LNaive,3000,17,0.1730818382602286
-LNaive,3000,18,0.1730818382602286
-LNaive,3000,19,0.1730818382602286
-LNaive,3000,20,0.1730818382602286
-LNaive,3000,21,0.1730818382602286
-LNaive,3000,22,0.1730818382602286
-LNaive,3000,23,0.1730818382602286
-LNaive,3000,24,0.1730818382602286
-LNaive,3000,25,0.1730818382602286
-LNaive,3000,26,0.1730818382602286
-LNaive,3000,27,0.1730818382602286
-LNaive,3000,28,0.1730818382602286
-LNaive,3000,29,0.1730818382602286
-LNaive,6000,0,0.12238734153404085
-LNaive,6000,1,0.12238734153404085
-LNaive,6000,2,0.12238734153404085
-LNaive,6000,3,0.12238734153404085
-LNaive,6000,4,0.12238734153404085
-LNaive,6000,5,0.12238734153404085
-LNaive,6000,6,0.12238734153404085
-LNaive,6000,7,0.12238734153404085
-LNaive,6000,8,0.12238734153404085
-LNaive,6000,9,0.12238734153404085
-LNaive,6000,10,0.12238734153404085
-LNaive,6000,11,0.12238734153404085
-LNaive,6000,12,0.12238734153404085
-LNaive,6000,13,0.12238734153404085
-LNaive,6000,14,0.12238734153404085
-LNaive,6000,15,0.12238734153404085
-LNaive,6000,16,0.12238734153404085
-LNaive,6000,17,0.12238734153404085
-LNaive,6000,18,0.12238734153404085
-LNaive,6000,19,0.12238734153404085
-LNaive,6000,20,0.12238734153404085
-LNaive,6000,21,0.12238734153404085
-LNaive,6000,22,0.12238734153404085
-LNaive,6000,23,0.12238734153404085
-LNaive,6000,24,0.12238734153404085
-LNaive,6000,25,0.12238734153404085
-LNaive,6000,26,0.12238734153404085
-LNaive,6000,27,0.12238734153404085
-LNaive,6000,28,0.12238734153404085
-LNaive,6000,29,0.12238734153404085
-LNaive,9000,0,0.0999288459113783
-LNaive,9000,1,0.0999288459113783
-LNaive,9000,2,0.0999288459113783
-LNaive,9000,3,0.0999288459113783
-LNaive,9000,4,0.0999288459113783
-LNaive,9000,5,0.0999288459113783
-LNaive,9000,6,0.0999288459113783
-LNaive,9000,7,0.0999288459113783
-LNaive,9000,8,0.0999288459113783
-LNaive,9000,9,0.0999288459113783
-LNaive,9000,10,0.0999288459113783
-LNaive,9000,11,0.0999288459113783
-LNaive,9000,12,0.0999288459113783
-LNaive,9000,13,0.0999288459113783
-LNaive,9000,14,0.0999288459113783
-LNaive,9000,15,0.0999288459113783
-LNaive,9000,16,0.0999288459113783
-LNaive,9000,17,0.0999288459113783
-LNaive,9000,18,0.0999288459113783
-LNaive,9000,19,0.0999288459113783
-LNaive,9000,20,0.0999288459113783
-LNaive,9000,21,0.0999288459113783
-LNaive,9000,22,0.0999288459113783
-LNaive,9000,23,0.0999288459113783
-LNaive,9000,24,0.0999288459113783
-LNaive,9000,25,0.0999288459113783
-LNaive,9000,26,0.0999288459113783
-LNaive,9000,27,0.0999288459113783
-LNaive,9000,28,0.0999288459113783
-LNaive,9000,29,0.0999288459113783
-LNaive,12000,0,0.0865409191301143
-LNaive,12000,1,0.0865409191301143
-LNaive,12000,2,0.0865409191301143
-LNaive,12000,3,0.0865409191301143
-LNaive,12000,4,0.0865409191301143
-LNaive,12000,5,0.0865409191301143
-LNaive,12000,6,0.0865409191301143
-LNaive,12000,7,0.0865409191301143
-LNaive,12000,8,0.0865409191301143
-LNaive,12000,9,0.0865409191301143
-LNaive,12000,10,0.0865409191301143
-LNaive,12000,11,0.0865409191301143
-LNaive,12000,12,0.0865409191301143
-LNaive,12000,13,0.0865409191301143
-LNaive,12000,14,0.0865409191301143
-LNaive,12000,15,0.0865409191301143
-LNaive,12000,16,0.0865409191301143
-LNaive,12000,17,0.0865409191301143
-LNaive,12000,18,0.0865409191301143
-LNaive,12000,19,0.0865409191301143
-LNaive,12000,20,0.0865409191301143
-LNaive,12000,21,0.0865409191301143
-LNaive,12000,22,0.0865409191301143
-LNaive,12000,23,0.0865409191301143
-LNaive,12000,24,0.0865409191301143
-LNaive,12000,25,0.0865409191301143
-LNaive,12000,26,0.0865409191301143
-LNaive,12000,27,0.0865409191301143
-LNaive,12000,28,0.0865409191301143
-LNaive,12000,29,0.0865409191301143
-LNaive,15000,0,0.07740455120409906
-LNaive,15000,1,0.07740455120409906
-LNaive,15000,2,0.07740455120409906
-LNaive,15000,3,0.07740455120409906
-LNaive,15000,4,0.07740455120409906
-LNaive,15000,5,0.07740455120409906
-LNaive,15000,6,0.07740455120409906
-LNaive,15000,7,0.07740455120409906
-LNaive,15000,8,0.07740455120409906
-LNaive,15000,9,0.07740455120409906
-LNaive,15000,10,0.07740455120409906
-LNaive,15000,11,0.07740455120409906
-LNaive,15000,12,0.07740455120409906
-LNaive,15000,13,0.07740455120409906
-LNaive,15000,14,0.07740455120409906
-LNaive,15000,15,0.07740455120409906
-LNaive,15000,16,0.07740455120409906
-LNaive,15000,17,0.07740455120409906
-LNaive,15000,18,0.07740455120409906
-LNaive,15000,19,0.07740455120409906
-LNaive,15000,20,0.07740455120409906
-LNaive,15000,21,0.07740455120409906
-LNaive,15000,22,0.07740455120409906
-LNaive,15000,23,0.07740455120409906
-LNaive,15000,24,0.07740455120409906
-LNaive,15000,25,0.07740455120409906
-LNaive,15000,26,0.07740455120409906
-LNaive,15000,27,0.07740455120409906
-LNaive,15000,28,0.07740455120409906
-LNaive,15000,29,0.07740455120409906
-LNaive,18000,0,0.07066036458008118
-LNaive,18000,1,0.07066036458008118
-LNaive,18000,2,0.07066036458008118
-LNaive,18000,3,0.07066036458008118
-LNaive,18000,4,0.07066036458008118
-LNaive,18000,5,0.07066036458008118
-LNaive,18000,6,0.07066036458008118
-LNaive,18000,7,0.07066036458008118
-LNaive,18000,8,0.07066036458008118
-LNaive,18000,9,0.07066036458008118
-LNaive,18000,10,0.07066036458008118
-LNaive,18000,11,0.07066036458008118
-LNaive,18000,12,0.07066036458008118
-LNaive,18000,13,0.07066036458008118
-LNaive,18000,14,0.07066036458008118
-LNaive,18000,15,0.07066036458008118
-LNaive,18000,16,0.07066036458008118
-LNaive,18000,17,0.07066036458008118
-LNaive,18000,18,0.07066036458008118
-LNaive,18000,19,0.07066036458008118
-LNaive,18000,20,0.07066036458008118
-LNaive,18000,21,0.07066036458008118
-LNaive,18000,22,0.07066036458008118
-LNaive,18000,23,0.07066036458008118
-LNaive,18000,24,0.07066036458008118
-LNaive,18000,25,0.07066036458008118
-LNaive,18000,26,0.07066036458008118
-LNaive,18000,27,0.07066036458008118
-LNaive,18000,28,0.07066036458008118
-LNaive,18000,29,0.07066036458008118
-Greedy,3000,0,0.34723916799351384
-Greedy,3000,1,0.3459652120721066
-Greedy,3000,2,0.3622313030705129
-Greedy,3000,3,0.3629449445309261
-Greedy,3000,4,0.34820279169581014
-Greedy,3000,5,0.35439536398837435
-Greedy,3000,6,0.3628327255275745
-Greedy,3000,7,0.3496325266609508
-Greedy,3000,8,0.36205731363488325
-Greedy,3000,9,0.3574963572766112
-Greedy,3000,10,0.3475526982986106
-Greedy,3000,11,0.35199775783773823
-Greedy,3000,12,0.36423615354166117
-Greedy,3000,13,0.35219732741511
-Greedy,3000,14,0.3483256434338864
-Greedy,3000,15,0.3505957901380361
-Greedy,3000,16,0.3653564875913793
-Greedy,3000,17,0.37583869519405533
-Greedy,3000,18,0.3508138028558039
-Greedy,3000,19,0.36691586525275643
-Greedy,3000,20,0.20687236484481342
-Greedy,3000,21,0.35634394253378465
-Greedy,3000,22,0.3474380210343706
-Greedy,3000,23,0.36061193853338525
-Greedy,3000,24,0.368099312289935
-Greedy,3000,25,0.3621017132828408
-Greedy,3000,26,0.3659211228106558
-Greedy,3000,27,0.36933241301581377
-Greedy,3000,28,0.35257054333864235
-Greedy,3000,29,0.35125944862837133
-Greedy,6000,0,0.27366641525559876
-Greedy,6000,1,0.27366641525559876
-Greedy,6000,2,0.27366641525559876
-Greedy,6000,3,0.27366641525559876
-Greedy,6000,4,0.37267299218416317
-Greedy,6000,5,0.27366641525559876
-Greedy,6000,6,0.27366641525559876
-Greedy,6000,7,0.27366641525559876
-Greedy,6000,8,0.27366641525559876
-Greedy,6000,9,0.27366641525559876
-Greedy,6000,10,0.27366641525559876
-Greedy,6000,11,0.27366641525559876
-Greedy,6000,12,0.27366641525559876
-Greedy,6000,13,0.27366641525559876
-Greedy,6000,14,0.27366641525559876
-Greedy,6000,15,0.27366641525559876
-Greedy,6000,16,0.27366641525559876
-Greedy,6000,17,0.27366641525559876
-Greedy,6000,18,0.27366641525559876
-Greedy,6000,19,0.27366641525559876
-Greedy,6000,20,0.27366641525559876
-Greedy,6000,21,0.27366641525559876
-Greedy,6000,22,0.27366641525559876
-Greedy,6000,23,0.27366641525559876
-Greedy,6000,24,0.27366641525559876
-Greedy,6000,25,0.27366641525559876
-Greedy,6000,26,0.27366641525559876
-Greedy,6000,27,0.27366641525559876
-Greedy,6000,28,0.27366641525559876
-Greedy,6000,29,0.27366641525559876
-Greedy,9000,0,0.20687236484481342
-Greedy,9000,1,0.27366641525559876
-Greedy,9000,2,0.27366641525559876
-Greedy,9000,3,0.27366641525559876
-Greedy,9000,4,0.27366641525559876
-Greedy,9000,5,0.20687236484481342
-Greedy,9000,6,0.20687236484481342
-Greedy,9000,7,0.20687236484481342
-Greedy,9000,8,0.20687236484481342
-Greedy,9000,9,0.20687236484481342
-Greedy,9000,10,0.27366641525559876
-Greedy,9000,11,0.20687236484481342
-Greedy,9000,12,0.27366641525559876
-Greedy,9000,13,0.20687236484481342
-Greedy,9000,14,0.27366641525559876
-Greedy,9000,15,0.27366641525559876
-Greedy,9000,16,0.27366641525559876
-Greedy,9000,17,0.20687236484481342
-Greedy,9000,18,0.27366641525559876
-Greedy,9000,19,0.20687236484481342
-Greedy,9000,20,0.20687236484481342
-Greedy,9000,21,0.20687236484481342
-Greedy,9000,22,0.27366641525559876
-Greedy,9000,23,0.27366641525559876
-Greedy,9000,24,0.20687236484481342
-Greedy,9000,25,0.20687236484481342
-Greedy,9000,26,0.20687236484481342
-Greedy,9000,27,0.27366641525559876
-Greedy,9000,28,0.27366641525559876
-Greedy,9000,29,0.20687236484481342
-Greedy,12000,0,0.38204198687702706
-Greedy,12000,1,0.315513083874587
-Greedy,12000,2,0.3080403640785656
-Greedy,12000,3,0.3018982736555398
-Greedy,12000,4,0.3084790118536205
-Greedy,12000,5,0.20687236484481342
-Greedy,12000,6,0.27366641525559876
-Greedy,12000,7,0.302025178078035
-Greedy,12000,8,0.31827555736442403
-Greedy,12000,9,0.3038793943427103
-Greedy,12000,10,0.3136640241066311
-Greedy,12000,11,0.30948092523810367
-Greedy,12000,12,0.3077201806493213
-Greedy,12000,13,0.3472779591214208
-Greedy,12000,14,0.3822367988564699
-Greedy,12000,15,0.3590698975519264
-Greedy,12000,16,0.3134409815731516
-Greedy,12000,17,0.30881347903408707
-Greedy,12000,18,0.20687236484481342
-Greedy,12000,19,0.3090275835443683
-Greedy,12000,20,0.31573471604898984
-Greedy,12000,21,0.38167819510687573
-Greedy,12000,22,0.3808562840650509
-Greedy,12000,23,0.30794807480734665
-Greedy,12000,24,0.27366641525559876
-Greedy,12000,25,0.3870227560204951
-Greedy,12000,26,0.31347512848879844
-Greedy,12000,27,0.3083852282126054
-Greedy,12000,28,0.3870227560204951
-Greedy,12000,29,0.30968852291522975
-Greedy,15000,0,0.32471897682560424
-Greedy,15000,1,0.30332447673103036
-Greedy,15000,2,0.31998602646313024
-Greedy,15000,3,0.30950840137077806
-Greedy,15000,4,0.3102178671110267
-Greedy,15000,5,0.32265188974967596
-Greedy,15000,6,0.32019717557200456
-Greedy,15000,7,0.3122922240930024
-Greedy,15000,8,0.2783673425605997
-Greedy,15000,9,0.3178204546416248
-Greedy,15000,10,0.3050536682422891
-Greedy,15000,11,0.3255048888110519
-Greedy,15000,12,0.3132067133651055
-Greedy,15000,13,0.32184006182182645
-Greedy,15000,14,0.31607043272836033
-Greedy,15000,15,0.3174185926912929
-Greedy,15000,16,0.3106470045708809
-Greedy,15000,17,0.30911174259816865
-Greedy,15000,18,0.30699798789830357
-Greedy,15000,19,0.3063621889479904
-Greedy,15000,20,0.3068965201171874
-Greedy,15000,21,0.31724432840168393
-Greedy,15000,22,0.3221096787288211
-Greedy,15000,23,0.3200331208160905
-Greedy,15000,24,0.3119177205942725
-Greedy,15000,25,0.3214941160443803
-Greedy,15000,26,0.3115135341753803
-Greedy,15000,27,0.3076298016735426
-Greedy,15000,28,0.32220863445933934
-Greedy,15000,29,0.31537766061757067
-Greedy,18000,0,0.18244427683706577
-Greedy,18000,1,0.18244427683706577
-Greedy,18000,2,0.20687236484481342
-Greedy,18000,3,0.18244427683706577
-Greedy,18000,4,0.20687236484481342
-Greedy,18000,5,0.18244427683706577
-Greedy,18000,6,0.18244427683706577
-Greedy,18000,7,0.20687236484481342
-Greedy,18000,8,0.20687236484481342
-Greedy,18000,9,0.20687236484481342
-Greedy,18000,10,0.18244427683706577
-Greedy,18000,11,0.18244427683706577
-Greedy,18000,12,0.20687236484481342
-Greedy,18000,13,0.18244427683706577
-Greedy,18000,14,0.12394647880423704
-Greedy,18000,15,0.20687236484481342
-Greedy,18000,16,0.18244427683706577
-Greedy,18000,17,0.20687236484481342
-Greedy,18000,18,0.20687236484481342
-Greedy,18000,19,0.18244427683706577
-Greedy,18000,20,0.20687236484481342
-Greedy,18000,21,0.20687236484481342
-Greedy,18000,22,0.20687236484481342
-Greedy,18000,23,0.20687236484481342
-Greedy,18000,24,0.18244427683706577
-Greedy,18000,25,0.18244427683706577
-Greedy,18000,26,0.20687236484481342
-Greedy,18000,27,0.20687236484481342
-Greedy,18000,28,0.20687236484481342
-Greedy,18000,29,0.18244427683706577

BIN
add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_mean.pickle


+ 0 - 361
add_linkselfie/outputs/plot_ciwidth_vs_budget_Depolar_mean_widths.csv

@@ -1,361 +0,0 @@
-scheduler,budget,trial_idx,width
-LNaive,3000,0,0.15485068457168077
-LNaive,3000,1,0.15502019412818455
-LNaive,3000,2,0.15487491278629187
-LNaive,3000,3,0.15497742389379213
-LNaive,3000,4,0.1550047941271319
-LNaive,3000,5,0.15541597663452278
-LNaive,3000,6,0.15506326790417826
-LNaive,3000,7,0.15528126214469415
-LNaive,3000,8,0.15493208008776987
-LNaive,3000,9,0.15500727956958532
-LNaive,3000,10,0.15507317106986107
-LNaive,3000,11,0.15494858871477893
-LNaive,3000,12,0.15498466248465992
-LNaive,3000,13,0.15513231257975962
-LNaive,3000,14,0.15469363579937295
-LNaive,3000,15,0.15510829800305226
-LNaive,3000,16,0.15528056035590082
-LNaive,3000,17,0.1550845558257326
-LNaive,3000,18,0.15505631520123628
-LNaive,3000,19,0.15504658748506103
-LNaive,3000,20,0.15461513428677348
-LNaive,3000,21,0.15470957673435168
-LNaive,3000,22,0.1548081565272979
-LNaive,3000,23,0.15508693544006627
-LNaive,3000,24,0.15517802848428788
-LNaive,3000,25,0.15540436028150126
-LNaive,3000,26,0.1554615888889591
-LNaive,3000,27,0.1549754480782745
-LNaive,3000,28,0.15456401977238893
-LNaive,3000,29,0.15515088958297296
-LNaive,6000,0,0.11509287734993452
-LNaive,6000,1,0.11524225798815114
-LNaive,6000,2,0.11534236836778525
-LNaive,6000,3,0.11524429010760089
-LNaive,6000,4,0.11515551522046343
-LNaive,6000,5,0.11513330662996361
-LNaive,6000,6,0.1152207664352098
-LNaive,6000,7,0.11524524113070406
-LNaive,6000,8,0.11509776080458939
-LNaive,6000,9,0.11520546291120078
-LNaive,6000,10,0.11530238339768976
-LNaive,6000,11,0.1151841249550993
-LNaive,6000,12,0.1151350728076638
-LNaive,6000,13,0.1151973298731296
-LNaive,6000,14,0.11524441581390563
-LNaive,6000,15,0.11521623472618979
-LNaive,6000,16,0.11527372252143156
-LNaive,6000,17,0.11512721115284102
-LNaive,6000,18,0.11527377126876358
-LNaive,6000,19,0.11523118529857096
-LNaive,6000,20,0.11534854284095335
-LNaive,6000,21,0.11530516250691598
-LNaive,6000,22,0.11520058487665688
-LNaive,6000,23,0.11511617535255292
-LNaive,6000,24,0.11523307333813643
-LNaive,6000,25,0.11526994632260038
-LNaive,6000,26,0.11515872638364094
-LNaive,6000,27,0.11542811182655294
-LNaive,6000,28,0.11521651678833225
-LNaive,6000,29,0.11529870847274076
-LNaive,9000,0,0.09458056953857129
-LNaive,9000,1,0.09441669520072822
-LNaive,9000,2,0.09448882782821395
-LNaive,9000,3,0.09453211188519892
-LNaive,9000,4,0.09445309323815829
-LNaive,9000,5,0.09455193203992289
-LNaive,9000,6,0.09450726990683413
-LNaive,9000,7,0.09446202182584094
-LNaive,9000,8,0.09451727136031221
-LNaive,9000,9,0.09451079247214826
-LNaive,9000,10,0.09448048494857884
-LNaive,9000,11,0.09442933793811953
-LNaive,9000,12,0.0944641364023581
-LNaive,9000,13,0.09452429938155044
-LNaive,9000,14,0.09458126442166291
-LNaive,9000,15,0.09447583786702654
-LNaive,9000,16,0.09447089603992079
-LNaive,9000,17,0.09445669253790999
-LNaive,9000,18,0.09442648138268481
-LNaive,9000,19,0.09447823500350579
-LNaive,9000,20,0.09452902836045057
-LNaive,9000,21,0.09453207905700017
-LNaive,9000,22,0.09444815548233639
-LNaive,9000,23,0.09448775014379386
-LNaive,9000,24,0.09455800674998184
-LNaive,9000,25,0.09450298010931543
-LNaive,9000,26,0.09435366625637244
-LNaive,9000,27,0.09435616281108024
-LNaive,9000,28,0.09450597466336115
-LNaive,9000,29,0.09458025102705635
-LNaive,12000,0,0.08328356834980749
-LNaive,12000,1,0.08314382507289414
-LNaive,12000,2,0.08318780441569405
-LNaive,12000,3,0.08327802612058147
-LNaive,12000,4,0.08329288229099795
-LNaive,12000,5,0.08324305033040007
-LNaive,12000,6,0.08317854528845307
-LNaive,12000,7,0.08322413313534179
-LNaive,12000,8,0.08322242056793867
-LNaive,12000,9,0.08314287635192015
-LNaive,12000,10,0.08328545286185525
-LNaive,12000,11,0.08321641170070697
-LNaive,12000,12,0.08318251714769218
-LNaive,12000,13,0.08314742528799722
-LNaive,12000,14,0.08322466555101823
-LNaive,12000,15,0.08328803468428995
-LNaive,12000,16,0.08319493232850655
-LNaive,12000,17,0.08323458641574306
-LNaive,12000,18,0.0832251603368434
-LNaive,12000,19,0.08314485309555014
-LNaive,12000,20,0.08324391827258117
-LNaive,12000,21,0.08325845796605487
-LNaive,12000,22,0.08318965068291967
-LNaive,12000,23,0.08324711749346954
-LNaive,12000,24,0.08322263640529161
-LNaive,12000,25,0.08322942316832295
-LNaive,12000,26,0.0832487275422831
-LNaive,12000,27,0.08319529426219617
-LNaive,12000,28,0.08312064969270049
-LNaive,12000,29,0.08321461862403154
-LNaive,15000,0,0.07452309492984957
-LNaive,15000,1,0.07450794354605145
-LNaive,15000,2,0.07451217892752515
-LNaive,15000,3,0.07445688008537829
-LNaive,15000,4,0.07447772983160218
-LNaive,15000,5,0.07445381709482107
-LNaive,15000,6,0.0745177035954168
-LNaive,15000,7,0.07446512157605983
-LNaive,15000,8,0.07442060745470468
-LNaive,15000,9,0.07450498202212642
-LNaive,15000,10,0.07445292735570595
-LNaive,15000,11,0.07452004853325404
-LNaive,15000,12,0.07448784462690171
-LNaive,15000,13,0.07449762450339685
-LNaive,15000,14,0.07447725824868508
-LNaive,15000,15,0.074521934995716
-LNaive,15000,16,0.07454934362473921
-LNaive,15000,17,0.07452601473380868
-LNaive,15000,18,0.07444368516904357
-LNaive,15000,19,0.07446847442989539
-LNaive,15000,20,0.0745275000625166
-LNaive,15000,21,0.074441274420766
-LNaive,15000,22,0.0745201586634468
-LNaive,15000,23,0.07446458288587235
-LNaive,15000,24,0.07447629295219331
-LNaive,15000,25,0.07449769236614961
-LNaive,15000,26,0.07453598254903641
-LNaive,15000,27,0.07453558757404527
-LNaive,15000,28,0.07444052799470205
-LNaive,15000,29,0.07446618326576403
-LNaive,18000,0,0.0686309567918666
-LNaive,18000,1,0.06869134551446304
-LNaive,18000,2,0.06869514689274236
-LNaive,18000,3,0.06867298617462873
-LNaive,18000,4,0.06868738695101602
-LNaive,18000,5,0.06866737906145826
-LNaive,18000,6,0.06869349623594655
-LNaive,18000,7,0.06865980306132
-LNaive,18000,8,0.06870701116394246
-LNaive,18000,9,0.06868668457337218
-LNaive,18000,10,0.06865420980827514
-LNaive,18000,11,0.06870407562529098
-LNaive,18000,12,0.06868913909640394
-LNaive,18000,13,0.06866842898642656
-LNaive,18000,14,0.0686724618643888
-LNaive,18000,15,0.06867166584896918
-LNaive,18000,16,0.06868421998037583
-LNaive,18000,17,0.06871452906081102
-LNaive,18000,18,0.06868896041117385
-LNaive,18000,19,0.0686673678167125
-LNaive,18000,20,0.06869795018596755
-LNaive,18000,21,0.0686719000855651
-LNaive,18000,22,0.06865684165916344
-LNaive,18000,23,0.06866265665776439
-LNaive,18000,24,0.068663076031885
-LNaive,18000,25,0.06866857337206204
-LNaive,18000,26,0.06869043278752963
-LNaive,18000,27,0.06867723186819541
-LNaive,18000,28,0.06867162904842265
-LNaive,18000,29,0.06872055452616331
-Greedy,3000,0,0.19067726530880932
-Greedy,3000,1,0.18870294172595503
-Greedy,3000,2,0.19058580752782378
-Greedy,3000,3,0.19039476674982148
-Greedy,3000,4,0.19123747144051464
-Greedy,3000,5,0.19116505352545912
-Greedy,3000,6,0.19260612990777956
-Greedy,3000,7,0.19088835920323882
-Greedy,3000,8,0.1916479546138514
-Greedy,3000,9,0.19050637758804434
-Greedy,3000,10,0.1909889563102278
-Greedy,3000,11,0.19109779431552135
-Greedy,3000,12,0.1918413832203051
-Greedy,3000,13,0.1901077079099537
-Greedy,3000,14,0.18982676544709978
-Greedy,3000,15,0.18991083548310955
-Greedy,3000,16,0.19213284373449563
-Greedy,3000,17,0.1917736752509453
-Greedy,3000,18,0.19057727288443865
-Greedy,3000,19,0.19237001463603853
-Greedy,3000,20,0.14148792020122428
-Greedy,3000,21,0.19222473894251055
-Greedy,3000,22,0.19116567170110266
-Greedy,3000,23,0.191337653699954
-Greedy,3000,24,0.19173528910993892
-Greedy,3000,25,0.1923986349443716
-Greedy,3000,26,0.19274208968196985
-Greedy,3000,27,0.19186351694644277
-Greedy,3000,28,0.18993647758475743
-Greedy,3000,29,0.1913564231164088
-Greedy,6000,0,0.14657783480744505
-Greedy,6000,1,0.14669854795607942
-Greedy,6000,2,0.14662940421693296
-Greedy,6000,3,0.1465709338233267
-Greedy,6000,4,0.16172586981404108
-Greedy,6000,5,0.1466302480656654
-Greedy,6000,6,0.14657792544379433
-Greedy,6000,7,0.1465109562665693
-Greedy,6000,8,0.1464784255958976
-Greedy,6000,9,0.14655721094911384
-Greedy,6000,10,0.14612740741229863
-Greedy,6000,11,0.1464239656449017
-Greedy,6000,12,0.14649070112431353
-Greedy,6000,13,0.14677758774672128
-Greedy,6000,14,0.14608023974165973
-Greedy,6000,15,0.1465986427481613
-Greedy,6000,16,0.14656533398416444
-Greedy,6000,17,0.1464008312024358
-Greedy,6000,18,0.14654300594372452
-Greedy,6000,19,0.14669927137355626
-Greedy,6000,20,0.14642310423464225
-Greedy,6000,21,0.14637573037372317
-Greedy,6000,22,0.14677829078271898
-Greedy,6000,23,0.14624495926128833
-Greedy,6000,24,0.14651491056528743
-Greedy,6000,25,0.14675568044186538
-Greedy,6000,26,0.14659783029137347
-Greedy,6000,27,0.14648513656152204
-Greedy,6000,28,0.14659026950331142
-Greedy,6000,29,0.14654791435966094
-Greedy,9000,0,0.1315552782865432
-Greedy,9000,1,0.14368450394347168
-Greedy,9000,2,0.14382038790110224
-Greedy,9000,3,0.14384162485001348
-Greedy,9000,4,0.14374478265142449
-Greedy,9000,5,0.14117850993600164
-Greedy,9000,6,0.139880363487722
-Greedy,9000,7,0.14121271976517444
-Greedy,9000,8,0.13971436671788054
-Greedy,9000,9,0.13740406189893256
-Greedy,9000,10,0.14369786238325727
-Greedy,9000,11,0.1410938749412158
-Greedy,9000,12,0.1436474301431309
-Greedy,9000,13,0.13969064407605777
-Greedy,9000,14,0.14505060727008512
-Greedy,9000,15,0.14370226552909765
-Greedy,9000,16,0.14550451443225204
-Greedy,9000,17,0.14122400242917704
-Greedy,9000,18,0.14363975926176442
-Greedy,9000,19,0.13155672228209464
-Greedy,9000,20,0.13944951007735126
-Greedy,9000,21,0.13977856404615915
-Greedy,9000,22,0.1493412431501384
-Greedy,9000,23,0.14402587687524152
-Greedy,9000,24,0.13937094863868868
-Greedy,9000,25,0.13160999019514366
-Greedy,9000,26,0.14101620093343145
-Greedy,9000,27,0.14390301573205097
-Greedy,9000,28,0.14343983718406683
-Greedy,9000,29,0.14088589849879585
-Greedy,12000,0,0.167503099611028
-Greedy,12000,1,0.12479420346339219
-Greedy,12000,2,0.12411486527901715
-Greedy,12000,3,0.12355649348157342
-Greedy,12000,4,0.12415474236142302
-Greedy,12000,5,0.13005785181842336
-Greedy,12000,6,0.15057594238021163
-Greedy,12000,7,0.12356803020781006
-Greedy,12000,8,0.1250453373808907
-Greedy,12000,9,0.12373659530164273
-Greedy,12000,10,0.12462610711016052
-Greedy,12000,11,0.12424582538633179
-Greedy,12000,12,0.12408575769548046
-Greedy,12000,13,0.1651044081542615
-Greedy,12000,14,0.16717525052398757
-Greedy,12000,15,0.1656169602338579
-Greedy,12000,16,0.12460583053610125
-Greedy,12000,17,0.12418514848213379
-Greedy,12000,18,0.1300159153646925
-Greedy,12000,19,0.12420461255115972
-Greedy,12000,20,0.12481435182441566
-Greedy,12000,21,0.16773044747485036
-Greedy,12000,22,0.16698969592968194
-Greedy,12000,23,0.12410647534375557
-Greedy,12000,24,0.1501646145474665
-Greedy,12000,25,0.16770731739285374
-Greedy,12000,26,0.12460893478600271
-Greedy,12000,27,0.12414621657111378
-Greedy,12000,28,0.16807792124509985
-Greedy,12000,29,0.12426469792651354
-Greedy,15000,0,0.16883128381117013
-Greedy,15000,1,0.15723303097230726
-Greedy,15000,2,0.15839145159254472
-Greedy,15000,3,0.15704839825887643
-Greedy,15000,4,0.15739661961957077
-Greedy,15000,5,0.1589227743609069
-Greedy,15000,6,0.15827920096219655
-Greedy,15000,7,0.15756205734552386
-Greedy,15000,8,0.14628994084882072
-Greedy,15000,9,0.15738562376889803
-Greedy,15000,10,0.1565992942761182
-Greedy,15000,11,0.1580962414589838
-Greedy,15000,12,0.15759658744697105
-Greedy,15000,13,0.15744365447019343
-Greedy,15000,14,0.15835904338891613
-Greedy,15000,15,0.15695885278520758
-Greedy,15000,16,0.157292671608181
-Greedy,15000,17,0.15775613823565937
-Greedy,15000,18,0.15750371866224924
-Greedy,15000,19,0.15680089648467246
-Greedy,15000,20,0.15698542805068857
-Greedy,15000,21,0.15717650639884592
-Greedy,15000,22,0.1581195529321024
-Greedy,15000,23,0.15747354989387854
-Greedy,15000,24,0.15733235463930825
-Greedy,15000,25,0.15794488935740622
-Greedy,15000,26,0.15644603588405928
-Greedy,15000,27,0.15709097977189676
-Greedy,15000,28,0.16815724264798593
-Greedy,15000,29,0.1574031096278783
-Greedy,18000,0,0.11237448050703322
-Greedy,18000,1,0.11254544456667756
-Greedy,18000,2,0.11841912722342356
-Greedy,18000,3,0.11265887736229506
-Greedy,18000,4,0.11847807793394984
-Greedy,18000,5,0.11242159016029175
-Greedy,18000,6,0.112481176705376
-Greedy,18000,7,0.1138799456790494
-Greedy,18000,8,0.11823845757030878
-Greedy,18000,9,0.11838452405302262
-Greedy,18000,10,0.11243293669883231
-Greedy,18000,11,0.11245307406698982
-Greedy,18000,12,0.11849046873443839
-Greedy,18000,13,0.11242720268576259
-Greedy,18000,14,0.10794981600876609
-Greedy,18000,15,0.11842607548321388
-Greedy,18000,16,0.11242564812002524
-Greedy,18000,17,0.11844510473356981
-Greedy,18000,18,0.11833032934573078
-Greedy,18000,19,0.11241783958052014
-Greedy,18000,20,0.11837910619934776
-Greedy,18000,21,0.11845854311189874
-Greedy,18000,22,0.11845543605475022
-Greedy,18000,23,0.11840925840681066
-Greedy,18000,24,0.11246183383289732
-Greedy,18000,25,0.11251443570517798
-Greedy,18000,26,0.11860626333930227
-Greedy,18000,27,0.11849914768799248
-Greedy,18000,28,0.11822199692096488
-Greedy,18000,29,0.11242687957351091

BIN
add_linkselfie/outputs/plot_minwidthsum_perpair_vs_budget_Depolar.pickle


BIN
add_linkselfie/outputs/plot_minwidthsum_perpair_weighted_vs_budget_Depolar.pickle


BIN
add_linkselfie/outputs/plot_widthsum_alllinks_vs_budget_Depolar.pickle


BIN
add_linkselfie/outputs/plot_widthsum_alllinks_weighted_vs_budget_Depolar.pickle


+ 0 - 74
add_linkselfie/piclecsv.py

@@ -1,74 +0,0 @@
-import os, pickle, csv, sys, re
-
-# ここに変換したいファイルを列挙(相対パスOK)
-files = [
-    "outputs/plot_minwidthsum_perpair_vs_budget_Depolar.pickle",
-    "outputs/plot_minwidthsum_perpair_weighted_vs_budget_Depolar.pickle",
-    "outputs/plot_widthsum_alllinks_vs_budget_Depolar.pickle",
-    "outputs/plot_widthsum_alllinks_weighted_vs_budget_Depolar.pickle",
-]
-
-# 指標候補(見つかった順に採用)
-PREFERRED_KEYS = [
-    "minwidthsum_weighted",
-    "minwidthsum",
-    "widthsum_alllinks_weighted",
-    "widthsum_alllinks",
-    "accuracy",  # 念のため
-    "value",     # 念のため
-    "metric"     # 念のため
-]
-
-def pick_metric_key(results):
-    """results は {budget: {...}}。どのキーでCSV化するか自動推定"""
-    for b, r in results.items():
-        if isinstance(r, dict) and r:
-            keys = set(r.keys())
-            # ノイズになりがちなキーを除外
-            keys -= {"per_pair_details", "details", "meta"}
-            # 優先候補から探す
-            for k in PREFERRED_KEYS:
-                if k in keys:
-                    return k
-            # それでも見つからなければ、数値っぽい最初のキー
-            for k in keys:
-                v = r.get(k)
-                if isinstance(v, (int, float)) or (v is not None and not isinstance(v, (dict, list, tuple, set))):
-                    return k
-    return None
-
-for path in files:
-    if not os.path.exists(path):
-        print(f"[WARN] not found: {path}")
-        continue
-
-    with open(path, "rb") as f:
-        try:
-            obj = pickle.load(f)
-        except Exception as e:
-            print(f"[ERROR] pickle.load failed for {path}: {e}")
-            continue
-
-    budgets = obj.get("budget_list", [])
-    results = obj.get("results", {})
-
-    if not budgets or not isinstance(results, dict) or not results:
-        print(f"[WARN] {path}: budgets or results is empty(サイズが小さい594Bケースかも)")
-        continue
-
-    metric_key = pick_metric_key(results)
-    if not metric_key:
-        print(f"[WARN] {path}: 指標キーが見つからないためスキップ(resultsの中身を要確認)")
-        continue
-
-    out_csv = os.path.splitext(path)[0] + ".csv"  # 同名で .csv を outputs/ に出力
-    os.makedirs(os.path.dirname(out_csv), exist_ok=True)
-
-    with open(out_csv, "w", newline="") as fcsv:
-        w = csv.writer(fcsv)
-        w.writerow(["budget", metric_key])
-        for b in budgets:
-            v = results.get(b, {}).get(metric_key)
-            w.writerow([b, v])
-
-    print(f"[OK] {out_csv} (列: budget,{metric_key})")

+ 0 - 29
add_linkselfie/piclecsv.py~

@@ -1,29 +0,0 @@
-cd ~/quantum/add_linkselfie/outputs
-python - <<'PY'
-import pickle, pprint
-path = "plot_minwidthsum_perpair_weighted_vs_budget_Depolar.pickle"
-with open(path,"rb") as f: obj = pickle.load(f)
-
-budgets = obj.get("budget_list", [])
-results = obj.get("results", {})
-print("budgets:", budgets[:10], "... (len =", len(budgets),")")
-print("results keys sample:", list(results.keys())[:5])
-
-for b in (min(budgets), budgets[len(budgets)//2], max(budgets)):
-    r = results.get(b)
-    if r is None: continue
-    print(f"\n=== Budget {b} ===")
-    for k in ("minwidthsum_weighted","widthsum_alllinks","per_pair_details"):
-        if k in r:
-            v = r[k]
-            print(f"{k}: type={type(v)}",
-                  ("len="+str(len(v)) if hasattr(v, '__len__') else ""))
-    det = r.get("per_pair_details", [])
-    if det:
-        d0 = det[0]
-        print("per_pair_details[0] keys:", list(d0.keys()))
-        if "min_width_per_pair" in d0:
-            print("min_width_per_pair:", d0["min_width_per_pair"])
-        if "alloc_by_path" in d0:
-            print("alloc_by_path (sample):", list(d0["alloc_by_path"].items())[:3])
-PY

BIN
add_linkselfie/plot_accuracy_vs_budget_Depolar.pdf


BIN
add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean.pdf


BIN
add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_max_bestby-mean_gap.pdf


BIN
add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean.pdf


BIN
add_linkselfie/plot_ciwidth_best_vs_budget_Depolar_mean_bestby-mean_gap.pdf


BIN
add_linkselfie/plot_ciwidth_vs_budget_Depolar_max.pdf


BIN
add_linkselfie/plot_ciwidth_vs_budget_Depolar_mean.pdf


BIN
add_linkselfie/plot_minwidthsum_perpair_vs_budget_Depolar.pdf


BIN
add_linkselfie/plot_minwidthsum_perpair_weighted_vs_budget_Depolar.pdf


BIN
add_linkselfie/plot_value_vs_budget_Depolar.pdf


BIN
add_linkselfie/plot_value_vs_budget_target_Depolar.pdf


BIN
add_linkselfie/plot_value_vs_used_Depolar.pdf


BIN
add_linkselfie/plot_weighted_errorrate_vs_budget_Depolar.pdf


BIN
add_linkselfie/plot_widthsum_alllinks_vs_budget_Depolar.pdf


BIN
add_linkselfie/plot_widthsum_alllinks_weighted_vs_budget_Depolar.pdf


BIN
add_linkselfie/schedulers/__pycache__/__init__.cpython-38.pyc


BIN
add_linkselfie/schedulers/__pycache__/greedy_scheduler.cpython-38.pyc


BIN
add_linkselfie/schedulers/__pycache__/lonline_nb.cpython-38.pyc


BIN
add_linkselfie/schedulers/__pycache__/pac_greedy_scheduler.cpython-38.pyc


BIN
add_linkselfie/schedulers/__pycache__/pac_naive_scheduler.cpython-38.pyc


BIN
add_linkselfie/schedulers/__pycache__/pac_wrapper.cpython-38.pyc


+ 0 - 95
add_linkselfie/schedulers/greedy_scheduler.py

@@ -1,95 +0,0 @@
-# schedulers/greedy_scheduler.py
-from .lonline_nb import lonline_network_benchmarking
-def greedy_budget_scheduler(
-    node_path_list,      # 例: [2, 2, 2] … 各ペアのパス本数
-    importance_list,     # 例: [0.3, 0.5, 0.7] … 長さは node_path_list と同じ
-    bounces,             # 例: [1,2,3,4](重複なし)
-    C_total,             # 総予算
-    network_generator,   # callable: (path_num, pair_idx) -> network
-    C_initial_per_pair=40,  # 各ペアの初期プローブ予算
-    return_details=False,
-):
-    num_pairs = len(node_path_list)
-    assert num_pairs == len(importance_list), "length mismatch: node_path_list vs importance_list"
-    if num_pairs == 0:
-        return ([], 0, []) if return_details else ([], 0)
-
-    assert len(bounces) == len(set(bounces)), "bounces must be unique"
-    assert all(isinstance(w, int) and w > 0 for w in bounces), "bounces must be positive ints"
-
-    # --- Step 1: 各ペアを軽くプローブ(初期推定) ---
-    initial_est_fids = [0.0] * num_pairs
-    initial_costs    = [0]   * num_pairs
-    per_pair_results = [(False, 0, None)] * num_pairs
-    per_pair_details = [{"alloc_by_path": {}, "est_fid_by_path": {}} for _ in range(num_pairs)]
-    consumed_total   = 0
-
-    for pair_idx, path_num in enumerate(node_path_list):
-        if consumed_total >= C_total or path_num <= 0:
-            continue
-        C_probe = min(int(C_initial_per_pair), max(int(C_total) - int(consumed_total), 0))
-        if C_probe <= 0:
-            break
-
-        network = network_generator(path_num, pair_idx)
-        path_list = list(range(1, path_num + 1))
-
-        if return_details:
-            correctness, cost, best_path_fid, alloc0, est0 = lonline_network_benchmarking(
-                network, path_list, list(bounces), int(C_probe), return_details=True
-            )
-            # 詳細をマージ(配分は加算・推定は後勝ち)
-            for l, b in alloc0.items():
-                per_pair_details[pair_idx]["alloc_by_path"][int(l)] = \
-                    per_pair_details[pair_idx]["alloc_by_path"].get(int(l), 0) + int(b)
-            per_pair_details[pair_idx]["est_fid_by_path"].update({int(k): float(v) for k, v in est0.items()})
-        else:
-            correctness, cost, best_path_fid = lonline_network_benchmarking(
-                network, path_list, list(bounces), int(C_probe)
-            )
-
-        consumed_total += int(cost)
-        initial_costs[pair_idx]    = int(cost)
-        initial_est_fids[pair_idx] = float(best_path_fid) if best_path_fid is not None else 0.0
-        per_pair_results[pair_idx] = (bool(correctness), int(cost), best_path_fid)
-
-    remaining = max(int(C_total) - int(consumed_total), 0)
-
-    # --- Step 2: importance * estimated_fidelity で優先度付け ---
-    scores = [(idx, importance_list[idx] * initial_est_fids[idx]) for idx in range(num_pairs)]
-    scores.sort(key=lambda x: x[1], reverse=True)
-
-    # --- Step 3: 残余予算を Greedy に配分(上位にまとめて) ---
-    for pair_idx, _score in scores:
-        if remaining <= 0:
-            break
-        path_num = node_path_list[pair_idx]
-        if path_num <= 0:
-            continue
-
-        network = network_generator(path_num, pair_idx)
-        path_list = list(range(1, path_num + 1))
-
-        if return_details:
-            correctness, cost, best_path_fid, alloc1, est1 = lonline_network_benchmarking(
-                network, path_list, list(bounces), int(remaining), return_details=True
-            )
-            for l, b in alloc1.items():
-                per_pair_details[pair_idx]["alloc_by_path"][int(l)] = \
-                    per_pair_details[pair_idx]["alloc_by_path"].get(int(l), 0) + int(b)
-            per_pair_details[pair_idx]["est_fid_by_path"].update({int(k): float(v) for k, v in est1.items()})
-        else:
-            correctness, cost, best_path_fid = lonline_network_benchmarking(
-                network, path_list, list(bounces), int(remaining)
-            )
-
-        per_pair_results[pair_idx] = (
-            bool(correctness),
-            int(initial_costs[pair_idx] + int(cost)),
-            best_path_fid,
-        )
-        remaining      -= int(cost)
-        consumed_total += int(cost)
-
-    return (per_pair_results, int(consumed_total), per_pair_details) if return_details \
-           else (per_pair_results, int(consumed_total))

+ 0 - 113
add_linkselfie/schedulers/lonline_nb.py

@@ -1,113 +0,0 @@
-# lonline_nb.py
-import math
-
-def lonline_network_benchmarking(network, path_list, bounces, C_budget, return_details=False):
-    """
-    L-Online 風の逐次削除型 NB。
-
-    返り値(常に一貫):
-      return_details=False:
-        (correctness: bool, cost: int, best_path_fidelity: float|None)
-      return_details=True:
-        (correctness: bool, cost: int, best_path_fidelity: float|None,
-         alloc_by_path: dict[int,int], est_fid_by_path: dict[int,float])
-
-    想定 I/F:
-      network.benchmark_path(path, bounces, sample_times) -> (p, used_cost)
-      忠実度変換: fidelity = p + (1 - p)/2
-    """
-    candidate_set = list(path_list)
-    # 既存コード由来のパラメータ(必要に応じて合わせてください)
-    s = 0
-    C = 0.01
-    delta = 0.1
-
-    # 集計器
-    cost = 0
-    estimated_fidelities = {}
-
-    # 詳細返却用の器(return_details に関わらず初期化:どの分岐でも形を揃える)
-    alloc_by_path = {int(p): 0 for p in path_list}
-    est_fid_by_path = {}
-
-    if not candidate_set or C_budget <= 0:
-        # 何も測れないケースでも形は揃えて返す
-        if return_details:
-            return False, int(cost), None, alloc_by_path, est_fid_by_path
-        return False, int(cost), None
-
-    # 1 経路を 1 サンプル測るコストの近似(ここでは hop 重みの和)
-    cost_per_sample_unit = sum(bounces) if sum(bounces) > 0 else 1
-
-    # ---- メインループ ----
-    while cost < C_budget and len(candidate_set) > 1:
-        s += 1
-        # ラウンド s のサンプル回数(既存式)
-        Ns = math.ceil(C * (2 ** (2 * s)) * math.log2(max((2 ** s) * len(candidate_set) / delta, 2)))
-        if Ns < 4:
-            Ns = 4
-
-        # このラウンドで 1 経路に必要なコスト目安
-        cost_needed_for_one_path = Ns * cost_per_sample_unit
-
-        # 2 ラウンド目以降で 1 経路すら回せないなら終了
-        if cost + cost_needed_for_one_path > C_budget and s > 1:
-            break
-
-        # hop ごとに同じ Ns を配る(network 側の想定 I/F に合わせる)
-        sample_times = {h: int(Ns) for h in bounces}
-
-        # ラウンド内の観測
-        p_s = {}
-        measured_paths = []
-
-        for path in list(candidate_set):
-            if cost + cost_needed_for_one_path > C_budget:
-                continue  # 予算が入らない経路はこのラウンドでは測らない
-
-            # 実測
-            p, used = network.benchmark_path(path, bounces, sample_times)
-            cost += int(used)
-
-            # 忠実度推定を更新(既存式)
-            fidelity = p + (1 - p) / 2.0
-            estimated_fidelities[path] = fidelity
-            p_s[path] = p
-            measured_paths.append(path)
-
-            # 詳細集計
-            alloc_by_path[int(path)] = alloc_by_path.get(int(path), 0) + int(used)
-            est_fid_by_path[int(path)] = float(fidelity)
-
-        # このラウンドで 1 本も測れなかったら終了
-        if not p_s:
-            break
-
-        # 連続削除(幅 2^{-s})
-        p_max = max(p_s.values())
-        new_candidate_set = []
-        for path in measured_paths:
-            if p_s[path] + 2 ** (-s) > p_max - 2 ** (-s):
-                new_candidate_set.append(path)
-
-        # 全消し回避:空になったら据え置き
-        candidate_set = new_candidate_set or candidate_set
-
-    # 1 本も推定できなかった場合
-    if not estimated_fidelities:
-        if return_details:
-            return False, int(cost), None, alloc_by_path, est_fid_by_path
-        return False, int(cost), None
-
-    # 最良推定パスと正解判定
-    best_path = max(estimated_fidelities, key=estimated_fidelities.get)
-    best_path_fidelity = estimated_fidelities[best_path]
-    correctness = (best_path == getattr(network, "best_path", None))
-
-    if return_details:
-        return bool(correctness), int(cost), best_path_fidelity, alloc_by_path, est_fid_by_path
-    return bool(correctness), int(cost), best_path_fidelity
-
-
-# 互換用エイリアス(古い呼び名を使っているコード向け)
-lonline_network_benchmarking_with_budget = lonline_network_benchmarking

+ 0 - 35
add_linkselfie/schedulers/memo.txt

@@ -1,35 +0,0 @@
-schedulerの入出力
-
-入力
-def naive_budget_scheduler(
-    node_path_list,      # 例: [2, 2, 2]  … 各ペアのリンク(パス)本数
-    importance_list,     # 例: [0.3, 0.5, 0.7] … 各ペアの重み(長さは node_path_list と同じ)
-    bounces,             # 例: [1,2,3,4](重複なし)… 1サンプルの重み(sum(bounces) が単価)
-    C_total,             # 総予算(整数推奨。超過しないよう切り捨て配分)
-    network_generator,   # callable: (path_num, pair_idx) -> network
-):
-
-
-
-置き場所:scheduler を叩く関数は evaluation.py。main はそれを呼ぶだけ(linkselfie流)。
-network_generator:evaluation の中で noise_model を閉じ込める factory を定義(既存パターンと一致)。
-
-
-出力
-per_pair_results: 入力の順(= node_path_list の順)で並べたタプルのリスト
-各要素は (correctness, cost, best_path_fidelity)
-
-total_cost: すべてのペアの cost 合計(予算チェックに便利)
-
-
-
-main -> evalation -> schedular -> algorithms
-という呼び出しになっている
-
-
-
-lnaive = 隣接ノードに配る測定予算が等しい。ノード間で等しくリンクに測
-定資源を配る。
-
-
-

+ 0 - 11
add_linkselfie/schedulers/memo.txt~

@@ -1,11 +0,0 @@
-
-
-
-
-neighbor_info_dict,
-    network_generator,
-    fidelity_generator,
-    bounces,
-    C_total,
-    C_initial_per_pair=0 # インターフェース互換性のため
-):

+ 0 - 210
add_linkselfie/simulation.py

@@ -1,210 +0,0 @@
-# simulation.py
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-from dataclasses import dataclass
-from typing import Dict, Any, List
-import csv, os, math, random
-
-# ===== 既存ネットワークAPIに合わせたアダプタ =====
-class Adapter:
-    """
-    あなたの network.py / nb_protocol.py を変更せずに使うための薄いラッパ。
-    - QuantumNetwork(path_num, fidelity_list, noise_model) を自前で構築
-    - 単一ペア 'Alice-Bob' に path_id=1..path_num を割当
-    - スケジューラが期待する nb_protocol 互換API(sample_path / true_fidelity)を Shim で提供
-    """
-    def __init__(self, noise_model: str, path_num: int, fidelity_list: List[float], seed: int | None = None):
-        if seed is not None:
-            random.seed(seed)
-        import network as qnet
-        # QuantumNetwork を直接構築(network.py を変更しない)
-        self.net = qnet.QuantumNetwork(path_num=path_num, fidelity_list=fidelity_list, noise_model=noise_model)
-        self.pairs = ["Alice-Bob"]
-        self.paths_map = {"Alice-Bob": list(range(1, path_num + 1))}
-        # nb_protocol 互換 Shim
-        self.nbp = _NBPShim(self.net)
-
-    # ---- ヘルパ ----
-    def true_fidelity(self, path_id: Any) -> float:
-        return self.nbp.true_fidelity(self.net, path_id)
-
-    def list_pairs(self) -> List[Any]:
-        return list(self.pairs)
-
-    def list_paths_of(self, pair_id: Any) -> List[Any]:
-        return list(self.paths_map.get(pair_id, []))
-
-    # ---- スケジューラ呼び出し ----
-    def run_scheduler(self, scheduler_name: str, budget_target: int,
-                      importance: Dict[Any, float]) -> Dict[str, Any]:
-        """
-        スケジューラに共通IFで実行要求する。
-        返り値の想定(辞書):
-          {
-            'used_cost_total': int,
-            'per_pair_details': [
-               {
-                 'pair_id': pair_id,
-                 'alloc_by_path': {path_id: sample_count, ...},
-                 'est_fid_by_path': {path_id: mean_estimate, ...},
-                 'best_pred_path': path_id,
-               }, ...
-            ]
-          }
-        """
-        if scheduler_name == "greedy":
-            from schedulers.greedy_scheduler import run as greedy_run
-            return greedy_run(self.net, self.pairs, self.paths_map, budget_target, importance, self.nbp)
-        elif scheduler_name == "naive":
-            from schedulers.lnaive_scheduler import run as naive_run
-            return naive_run(self.net, self.pairs, self.paths_map, budget_target, importance, self.nbp)
-        elif scheduler_name == "online_nb":
-            from schedulers.lonline_nb import run as onb_run
-            return onb_run(self.net, self.pairs, self.paths_map, budget_target, importance, self.nbp)
-        else:
-            raise ValueError(f"unknown scheduler: {scheduler_name}")
-
-# ===== 便利関数 =====
-def hoeffding_radius(n: int, delta: float = 0.05) -> float:
-    if n <= 0:
-        return 1.0
-    return math.sqrt(0.5 * math.log(2.0 / delta) / n)
-
-def clamp01(x: float) -> float:
-    return 0.0 if x < 0.0 else (1.0 if x > 1.0 else x)
-
-# ===== CSV I/O =====
-CSV_HEADER = [
-    "run_id", "noise", "scheduler", "budget_target",
-    "used_cost_total",
-    "pair_id", "path_id",
-    "importance",               # I_d
-    "samples",                  # B_{d,l}
-    "est_mean", "lb", "ub", "width",
-    "is_best_true", "is_best_pred"
-]
-
-def open_csv(path: str):
-    os.makedirs(os.path.dirname(path), exist_ok=True)
-    exists = os.path.exists(path)
-    f = open(path, "a", newline="")
-    w = csv.writer(f)
-    if not exists:
-        w.writerow(CSV_HEADER)
-    return f, w
-
-# ===== メインシミュレーション =====
-@dataclass
-class ExperimentConfig:
-    noise_model: str
-    budgets: List[int]
-    schedulers: List[str]            # ["greedy", "naive", "online_nb", ...]
-    repeats: int
-    importance_mode: str = "both"    # "both" / "weighted_only" / "unweighted_only"
-    delta_ci: float = 0.05           # 95%CI相当
-    out_csv: str = "outputs/raw_simulation_data.csv"
-    seed: int | None = None
-    # QuantumNetwork 構築用
-    path_num: int = 5
-    fidelity_list: List[float] | None = None
-
-def _importance_for_pairs(pairs: List[Any], mode: str) -> Dict[str, Dict[Any, float]]:
-    res: Dict[str, Dict[Any, float]] = {}
-    if mode in ("both", "unweighted_only"):
-        res["unweighted"] = {p: 1.0 for p in pairs}
-    if mode in ("both", "weighted_only"):
-        # 重要度は例として一様乱数(必要なら差替え)
-        res["weighted"] = {p: 0.5 + random.random() for p in pairs}
-    return res
-
-def run_and_append_csv(cfg: ExperimentConfig) -> str:
-    fid = cfg.fidelity_list or _default_fidelities(cfg.path_num)
-    adp = Adapter(cfg.noise_model, cfg.path_num, fid, seed=cfg.seed)
-    pairs = adp.list_pairs()
-    importance_sets = _importance_for_pairs(pairs, cfg.importance_mode)
-
-    f, w = open_csv(cfg.out_csv)
-    try:
-        run_id = 0
-        for _ in range(cfg.repeats):
-            run_id += 1
-            for budget in cfg.budgets:
-                for sched in cfg.schedulers:
-                    for imp_tag, I in importance_sets.items():
-                        # スケジューラ実行
-                        result = adp.run_scheduler(sched, budget, I)
-
-                        used_cost_total = int(result.get("used_cost_total", budget))
-                        per_pair_details: List[Dict[str, Any]] = result.get("per_pair_details", [])
-
-                        # 真の最良パス(正答率判定用)
-                        true_best_by_pair = {}
-                        for pair in pairs:
-                            paths = adp.list_paths_of(pair)
-                            best = None
-                            bestv = -1.0
-                            for pid in paths:
-                                tf = adp.true_fidelity(pid)
-                                if tf > bestv:
-                                    bestv, best = tf, pid
-                            true_best_by_pair[pair] = best
-
-                        # CSV行を形成
-                        for det in per_pair_details:
-                            pair_id = det["pair_id"]
-                            alloc = det.get("alloc_by_path", {}) or {}
-                            est   = det.get("est_fid_by_path", {}) or {}
-                            pred  = det.get("best_pred_path")
-
-                            for path_id, samples in alloc.items():
-                                m = float(est.get(path_id, 0.5))
-                                r = hoeffding_radius(int(samples), cfg.delta_ci)
-                                lb = clamp01(m - r)
-                                ub = clamp01(m + r)
-                                width = ub - lb
-
-                                is_true_best = (true_best_by_pair.get(pair_id) == path_id)
-                                is_best_pred = (pred == path_id)
-
-                                w.writerow([
-                                    f"{run_id}-{imp_tag}",
-                                    cfg.noise_model,
-                                    sched,
-                                    budget,
-                                    used_cost_total,
-                                    pair_id,
-                                    path_id,
-                                    I.get(pair_id, 1.0),
-                                    int(samples),
-                                    m, lb, ub, width,
-                                    int(is_true_best), int(is_best_pred),
-                                ])
-    finally:
-        f.close()
-    return cfg.out_csv
-
-# ===== nb_protocol 互換 Shim =====
-class _NBPShim:
-    """
-    スケジューラが期待する nb_protocol 風のAPIを提供:
-      - sample_path(net, path_id, n): QuantumNetwork.benchmark_path を呼ぶ
-      - true_fidelity(net, path_id): 量子チャネルの ground truth を返す
-    """
-    def __init__(self, net):
-        self.net = net
-
-    def sample_path(self, net, path_id: int, n: int) -> float:
-        # 1-bounce を n 回の測定にマップ(nb_protocol.NBProtocolAlice の設計に整合)
-        p, _cost = self.net.benchmark_path(path_id, bounces=[1], sample_times={1: int(n)})
-        return float(p)
-
-    def true_fidelity(self, net, path_id: int) -> float:
-        return float(self.net.quantum_channels[path_id - 1].fidelity)
-
-# ===== デフォルト忠実度の簡易生成(必要なら差替え) =====
-def _default_fidelities(path_num: int) -> List[float]:
-    alpha, beta, var = 0.93, 0.85, 0.02
-    res = [max(0.8, min(0.999, random.gauss(beta, var))) for _ in range(path_num)]
-    best_idx = random.randrange(path_num)
-    res[best_idx] = max(0.85, min(0.999, random.gauss(alpha, var)))
-    return res

+ 0 - 124
add_linkselfie/simulation.py~

@@ -1,124 +0,0 @@
-
-# simulation.py
-# Produces ONE CSV of raw data with a "scheduler" and "used" (actual spent cost) columns.
-
-from dataclasses import dataclass
-from typing import List, Dict, Tuple
-import math
-import csv
-import random
-
-from fidelity import generate_fidelity_list_random, generate_importance_list_random
-
-@dataclass
-class SimConfig:
-    n_pairs: int = 3
-    links_per_pair: int = 5
-    budgets: List[int] = None
-    trials: int = 10
-    seed: int = 42
-    init_samples_per_link: int = 4
-    delta: float = 0.1
-    cost_per_sample: int = 1
-    schedulers: List[str] = None  # names for series
-
-    def __post_init__(self):
-        if self.budgets is None:
-            self.budgets = [500, 1000, 2000, 5000]
-        if self.schedulers is None:
-            self.schedulers = ["GreedySimple"]
-
-def _radius(n: int, delta: float) -> float:
-    if n <= 0:
-        return float("inf")
-    return math.sqrt(0.5 * math.log(2.0 / max(1e-12, delta)) / n)
-
-def _argmax(d: Dict[int, float]) -> int:
-    best_k = None
-    best_v = -1e9
-    for k, v in d.items():
-        if v > best_v or (v == best_v and (best_k is None or k < best_k)):
-            best_k, best_v = k, v
-    return best_k if best_k is not None else -1
-
-def _run_scheduler_greedy_simple(n_pairs, links_per_pair, budgets_sorted, delta, init_samples_per_link, cost_per_sample, true_fids_per_pair, importances, writer, trial, scheduler_name):
-    # Per-budget deterministic re-run for snapshots
-    rng_state0 = random.getstate()
-    for b in budgets_sorted:
-        random.setstate(rng_state0)
-        est: Dict[Tuple[int,int], float] = {}
-        ns: Dict[Tuple[int,int], int] = {}
-        for p in range(n_pairs):
-            for l in range(links_per_pair):
-                est[(p,l)] = 0.0
-                ns[(p,l)] = 0
-        used = 0
-        # phase-1: uniform
-        stop = False
-        for p in range(n_pairs):
-            for l in range(links_per_pair):
-                for _ in range(init_samples_per_link):
-                    x = 1 if random.random() < true_fids_per_pair[p][l] else 0
-                    ns[(p,l)] += 1
-                    est[(p,l)] += (x - est[(p,l)]) / ns[(p,l)]
-                    used += cost_per_sample
-                    if used >= b:
-                        stop = True
-                        break
-                if stop: break
-            if stop: break
-        # phase-2: greedy
-        while used < b:
-            for p in range(n_pairs):
-                cur = {l: est[(p,l)] for l in range(links_per_pair)}
-                best_l = max(cur.keys(), key=lambda kk: cur[kk])
-                x = 1 if random.random() < true_fids_per_pair[p][best_l] else 0
-                ns[(p,best_l)] += 1
-                est[(p,best_l)] += (x - est[(p,best_l)]) / ns[(p,best_l)]
-                used += cost_per_sample
-                if used >= b:
-                    break
-        # emit rows (same 'used' for all rows in this (trial,budget,scheduler))
-        for p in range(n_pairs):
-            tb = max(range(links_per_pair), key=lambda l: true_fids_per_pair[p][l])
-            cur_map = {l: est[(p,l)] for l in range(links_per_pair)}
-            ca = max(range(links_per_pair), key=lambda l: cur_map[l])
-            for l in range(links_per_pair):
-                k = (p,l)
-                r = _radius(ns[k], delta)
-                lb = max(0.0, est[k] - r if ns[k] > 0 else 0.0)
-                ub = min(1.0, est[k] + r if ns[k] > 0 else 1.0)
-                writer.writerow([
-                    scheduler_name, trial, b, used, p, l, importances[p], true_fids_per_pair[p][l],
-                    est[k], ns[k], lb, ub,
-                    1 if l == tb else 0,
-                    1 if l == ca else 0,
-                ])
-    random.setstate(rng_state0)
-
-def run_simulation(csv_path: str, cfg: SimConfig) -> None:
-    random.seed(cfg.seed)
-    budgets_sorted = sorted(set(cfg.budgets))
-
-    with open(csv_path, "w", newline="") as f:
-        w = csv.writer(f)
-        w.writerow([
-            "scheduler","trial","budget_target","used","pair_id","link_id","importance","true_fid",
-            "est_mean","n_samples","lb","ub","is_true_best","is_pair_current_argmax"
-        ])
-
-        for trial in range(cfg.trials):
-            # Ground truth per pair
-            true_fids_per_pair: Dict[int, Dict[int, float]] = {}
-            importances: Dict[int, float] = {}
-            for p in range(cfg.n_pairs):
-                true_list = generate_fidelity_list_random(cfg.links_per_pair)
-                true_fids_per_pair[p] = {i: true_list[i] for i in range(cfg.links_per_pair)}
-                importances[p] = generate_importance_list_random(1)[0]
-
-            for sched_name in cfg.schedulers:
-                _run_scheduler_greedy_simple(
-                    cfg.n_pairs, cfg.links_per_pair, budgets_sorted, cfg.delta,
-                    cfg.init_samples_per_link, cfg.cost_per_sample,
-                    true_fids_per_pair, importances, w, trial, sched_name
-                )

+ 0 - 0
add_linkselfie/LICENSE → new_add_linkselfie/LICENSE


+ 0 - 0
add_linkselfie/README.md → new_add_linkselfie/README.md


BIN
new_add_linkselfie/__pycache__/evaluation.cpython-38.pyc


BIN
new_add_linkselfie/__pycache__/evaluationgap.cpython-38.pyc


BIN
new_add_linkselfie/__pycache__/evaluationpair.cpython-38.pyc


BIN
add_linkselfie/__pycache__/nb_protocol.cpython-38.pyc → new_add_linkselfie/__pycache__/nb_protocol.cpython-38.pyc


BIN
add_linkselfie/__pycache__/network.cpython-38.pyc → new_add_linkselfie/__pycache__/network.cpython-38.pyc


BIN
add_linkselfie/__pycache__/utils.cpython-38.pyc → new_add_linkselfie/__pycache__/utils.cpython-38.pyc


+ 69 - 0
new_add_linkselfie/convert.py

@@ -0,0 +1,69 @@
+# convert.py
+import sys, json, pickle, numpy as np, pandas as pd
+from pathlib import Path
+
+def json_safe(o):
+    if isinstance(o, (str, int, float, bool)) or o is None: return o
+    if isinstance(o, (list, tuple, set)): return [json_safe(x) for x in o]
+    if isinstance(o, dict): return {str(k): json_safe(v) for k, v in o.items()}
+    if isinstance(o, pd.DataFrame): return [json_safe(r) for r in o.to_dict(orient="records")]
+    if isinstance(o, pd.Series): return json_safe(o.to_dict())
+    if isinstance(o, np.ndarray): return json_safe(o.tolist())
+    if isinstance(o, (np.integer,)): return int(o)
+    if isinstance(o, (np.floating,)): return float(o)
+    if isinstance(o, (np.bool_,)): return bool(o)
+    return repr(o)
+
+def dict_of_lists_to_records(d):
+    lists = {k:v for k,v in d.items() if isinstance(v, (list, tuple, np.ndarray))}
+    if not lists: return None
+    lens = {len(v) for v in lists.values()}
+    if len(lens) != 1: return None
+    n = next(iter(lens))
+    recs = []
+    for i in range(n):
+        rec = {}
+        for k,v in d.items():
+            rec[k] = v[i] if isinstance(v, (list, tuple, np.ndarray)) else v
+        recs.append(json_safe(rec))
+    return recs
+
+def to_records(obj):
+    if isinstance(obj, pd.DataFrame): return obj.to_dict(orient="records")
+    if isinstance(obj, list) and (len(obj)==0 or isinstance(obj[0], dict)): return obj
+    if isinstance(obj, dict):
+        for k in ("data","results","records","runs","experiments"):
+            if k in obj and isinstance(obj[k], list): return obj[k]
+        recs = dict_of_lists_to_records(obj)
+        if recs is not None: return recs
+        return [json_safe(obj)]
+    return [json_safe(obj)]
+
+def main(src_path, out_dir=None):
+    src = Path(src_path)
+    if not src.exists(): raise FileNotFoundError(src)
+    base_dir = Path(__file__).parent
+    out_dir = Path(out_dir) if out_dir else (base_dir / "outputs")
+    out_dir.mkdir(parents=True, exist_ok=True)
+
+    with src.open("rb") as f: obj = pickle.load(f)
+    safe = json_safe(obj)
+    records = to_records(obj)
+    stem = src.stem
+    out_array  = out_dir / f"{stem}.records.json"
+    out_ndjson = out_dir / f"{stem}.records.ndjson"
+    out_raw    = out_dir / f"{stem}.raw.json"
+
+    with out_array.open("w", encoding="utf-8") as f: json.dump(json_safe(records), f, ensure_ascii=False, indent=2)
+    with out_ndjson.open("w", encoding="utf-8") as f:
+        for rec in records:
+            json.dump(json_safe(rec), f, ensure_ascii=False); f.write("\n")
+    with out_raw.open("w", encoding="utf-8") as f: json.dump(safe, f, ensure_ascii=False, indent=2)
+
+    print("Wrote:\n- {}\n- {}\n- {}".format(out_array, out_ndjson, out_raw))
+
+if __name__ == "__main__":
+    if len(sys.argv) < 2:
+        print("Usage: python convert.py <src.pkl> [out_dir]")
+        sys.exit(1)
+    main(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else None)

+ 37 - 0
new_add_linkselfie/convert.py~

@@ -0,0 +1,37 @@
+# convert_pickle_to_json.py
+import sys, json, pickle, numpy as np, pandas as pd
+from pathlib import Path
+
+def json_safe(o):
+    if isinstance(o, (str, int, float, bool)) or o is None:
+        return o
+    if isinstance(o, (list, tuple, set)):
+        return [json_safe(x) for x in o]
+    if isinstance(o, dict):
+        return {str(k): json_safe(v) for k,v in o.items()}
+    if isinstance(o, pd.DataFrame):
+        return [json_safe(r) for r in o.to_dict(orient="records")]
+    if isinstance(o, pd.Series):
+        return json_safe(o.to_dict())
+    if isinstance(o, np.ndarray):
+        return json_safe(o.tolist())
+    if isinstance(o, (np.integer,)):
+        return int(o)
+    if isinstance(o, (np.floating,)):
+        return float(o)
+    if isinstance(o, (np.bool_,)):
+        return bool(o)
+    return repr(o)
+
+def main(src, dst):
+    with open(src, "rb") as f:
+        obj = pickle.load(f)
+    safe = json_safe(obj)
+    with open(dst, "w", encoding="utf-8") as f:
+        json.dump(safe, f, ensure_ascii=False, indent=2)
+
+if __name__ == "__main__":
+    if len(sys.argv) < 3:
+        print("Usage: python convert_pickle_to_json.py <src.pkl> <dst.json>")
+        sys.exit(1)
+    main(sys.argv[1], sys.argv[2])

+ 27 - 0
add_linkselfie/evalationmemo.txt → new_add_linkselfie/evalationmemo.txt

@@ -58,3 +58,30 @@ pickle は ./outputs/ 以下に保存されるもののみ明記しています
    - 系列: スケジューラごと
    - CI帯: あり (平均 ± 95% CI)
    - Pickle: ./outputs/plot_minwidthsum_perpair_weighted_vs_budget_{NOISE}.pickle
+
+8) plot_importance_discovery_value_vs_budget_<noise>.pdf
+
+
+閾値 y をどのように決定するのか
+- 1 素朴にユーザーが直接指定する。
+  - 例 y = 0.01
+
+- 2 予算Cとdeltaから理論的に導出した固定基準に従って決める。
+  - 総測定予算C 、 どの程度の誤判定率をユーザーが望むのかによって閾値y
+    を決定する
+  - Hoeffding半径について
+  - f(N,delta) = r であらわされる
+    - 入力
+    - サンプル数 N
+      - 平均を計算するために集めたデータの数。
+      - 今回でいうとバウンス数
+    - 許容誤差確率 delta
+      - "どのくらいの確率で誤判定を許すか"を表す値。
+      - delta = 0.01ならば「誤差がこの信頼区間を超える確率は高々1%」という意味。
+    - 出力
+    - Hoeffding半径 r
+      - サンプル平均と真の平均が高々この値以内に収まることを保証する幅。
+    - 意味すること
+      - 真の平均は推定平均の周り±rの区間に含まれる確率が 1 - delta 以上である
+      
+ 

+ 515 - 0
new_add_linkselfie/evaluation.py

@@ -0,0 +1,515 @@
+# evaluation.py — Run shared sweep once; all plots aggregate from cache (reproducible with seed)
+
+import math
+import os
+import pickle
+import time
+import shutil
+import json
+import hashlib
+
+import matplotlib.pyplot as plt
+import numpy as np
+from cycler import cycler
+
+# metrics / viz を外出し(UNIX的分離)
+from metrics.widths import (
+    ci_radius_hoeffding,
+    sum_weighted_widths_all_links,
+    sum_weighted_min_widths_perpair,
+    sum_widths_all_links,
+    sum_minwidths_perpair,
+)
+from viz.plots import mean_ci95, plot_with_ci_band
+
+from network import QuantumNetwork
+from schedulers import run_scheduler  # スケジューラ呼び出し
+
+
+from utils.ids import to_idx0, normalize_to_1origin, is_keys_1origin
+
+from utils.fidelity import (
+    generate_fidelity_list_avg_gap,
+    generate_fidelity_list_fix_gap,
+    generate_fidelity_list_random,
+    _generate_fidelity_list_random_rng,
+)
+
+import matplotlib as mpl
+mpl.rcParams["figure.constrained_layout.use"] = True
+mpl.rcParams["savefig.bbox"] = "tight"   # すべての savefig に適用
+
+# ---- Matplotlib style(互換性重視: hex色 & 無難な記号類)----
+mpl.rcParams["font.family"] = "serif"
+mpl.rcParams["font.serif"] = [
+    "TeX Gyre Termes",
+    "Nimbus Roman",
+    "Liberation Serif",
+    "DejaVu Serif",
+]
+mpl.rcParams["font.size"] = 20
+
+default_cycler = (
+    cycler(color=["#4daf4a", "#377eb8", "#e41a1c", "#984ea3", "#ff7f00", "#a65628"])
+    + cycler(marker=["s", "v", "o", "x", "*", "+"])
+    + cycler(linestyle=[":", "--", "-", "-.", "--", ":"])
+)
+plt.rc("axes", prop_cycle=default_cycler)
+
+
+# =========================
+# Progress helpers
+# =========================
+def _start_timer():
+    return {"t0": time.time(), "last": time.time()}
+
+def _tick(timer):
+    now = time.time()
+    dt_total = now - timer["t0"]
+    dt_step = now - timer["last"]
+    timer["last"] = now
+    return dt_total, dt_step
+
+def _log(msg):
+    print(msg, flush=True)
+
+# =========================
+# Shared sweep (cache) helpers with file lock
+# =========================
+def _sweep_signature(budget_list, scheduler_names, noise_model,
+                     node_path_list, importance_list, bounces, repeat,
+                     importance_mode="fixed", importance_uniform=(0.0, 1.0), seed=None):
+    payload = {
+        "budget_list": list(budget_list),
+        "scheduler_names": list(scheduler_names),
+        "noise_model": str(noise_model),
+        "node_path_list": list(node_path_list),
+        "importance_list": list(importance_list) if importance_list is not None else None,
+        "importance_mode": str(importance_mode),
+        "importance_uniform": list(importance_uniform) if importance_uniform is not None else None,
+        "bounces": list(bounces),
+        "repeat": int(repeat),
+        "seed": int(seed) if seed is not None else None,
+        "version": 5,  # schema: true_fid_by_path を 1-origin に統一
+    }
+    sig = hashlib.md5(json.dumps(payload, sort_keys=True).encode("utf-8")).hexdigest()[:10]
+    return payload, sig
+
+def _shared_sweep_path(noise_model, sig):
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+    return os.path.join(outdir, f"shared_sweep_{noise_model}_{sig}.pickle")
+
+def _run_or_load_shared_sweep(
+    budget_list, scheduler_names, noise_model,
+    node_path_list, importance_list,
+    bounces=(1,2,3,4), repeat=10,
+    importance_mode="fixed", importance_uniform=(0.0, 1.0),
+    seed=None,
+    verbose=True, print_every=1,
+):
+    config, sig = _sweep_signature(
+        budget_list, scheduler_names, noise_model,
+        node_path_list, importance_list, bounces, repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform, seed=seed
+    )
+    cache_path = _shared_sweep_path(noise_model, sig)
+    lock_path  = cache_path + ".lock"
+    STALE_LOCK_SECS = 6 * 60 * 60        # 6時間無更新ならロック回収
+    HEARTBEAT_EVERY = 5.0                # 生成側のロック更新間隔(秒)
+
+    rng = np.random.default_rng(seed)    # 乱数生成器(再現性の核)
+
+    # 既存キャッシュがあれば即ロード
+    if os.path.exists(cache_path):
+        if verbose: _log(f"[shared] Load cached sweep: {os.path.basename(cache_path)}")
+        with open(cache_path, "rb") as f:
+            return pickle.load(f)
+
+    # --- ロック獲得(初回生成は1プロセスのみ)---
+    got_lock = False
+    while True:
+        try:
+            fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+            os.close(fd)
+            got_lock = True
+            break
+        except FileExistsError:
+            # 他プロセスが生成中:完成を待つ(タイムアウトなし)
+            if os.path.exists(cache_path):
+                with open(cache_path, "rb") as f:
+                    return pickle.load(f)
+
+            # スタックロック検出:長時間 mtime 更新がない場合は回収
+            try:
+                age = time.time() - os.path.getmtime(lock_path)
+            except OSError:
+                age = 0
+            if age > STALE_LOCK_SECS:
+                if verbose: _log("[shared] Stale lock detected. Removing...")
+                try: os.remove(lock_path)
+                except FileNotFoundError:
+                    pass
+                continue
+
+            # 進捗待ち
+            if verbose: _log("[shared] Waiting for cache to be ready...")
+            time.sleep(1.0)
+
+    try:
+        if verbose: _log(f"[shared] Run sweep and cache to: {os.path.basename(cache_path)}")
+
+        data = {name: {k: [] for k in range(len(budget_list))} for name in scheduler_names}
+        last_hb = time.time()
+
+        # === 1リピート=1トポロジを固定し、そのまま全ての budget を評価 ===
+        for r in range(repeat):
+            if verbose and ((r + 1) % print_every == 0 or r == 0):
+                _log(f"[shared] Repeat {r+1}/{repeat} (fixed topology)")
+
+            # この repeat 内で使い回す固定トポロジ(rng版)
+            fidelity_bank = [_generate_fidelity_list_random_rng(rng, n) for n in node_path_list]
+
+            # importance per repeat (fixed or uniform sample; rng使用)
+            if str(importance_mode).lower() == "uniform":
+                a, b = map(float, importance_uniform)
+                imp_list_r = [float(rng.uniform(a, b)) for _ in node_path_list]
+            else:
+                imp_list_r = list(importance_list)
+
+            def network_generator(path_num, pair_idx):
+                return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
+
+            # 同一トポロジのまま、予算だけを変えて実行
+            for k, C_total in enumerate(budget_list):
+                if verbose:
+                    _log(f"=== [SHARED {noise_model}] Budget={C_total} ({k+1}/{len(budget_list)}) ===")
+
+                # ハートビート(ロックの mtime を更新)
+                now = time.time()
+                if now - last_hb >= HEARTBEAT_EVERY:
+                    try:
+                        os.utime(lock_path, None)
+                    except FileNotFoundError:
+                        pass
+                    last_hb = now
+
+                for name in scheduler_names:
+                    per_pair_results, total_cost, per_pair_details = run_scheduler(
+                        node_path_list=node_path_list, importance_list=imp_list_r,
+                        scheduler_name=name,
+                        bounces=list(bounces),
+                        C_total=int(C_total),
+                        network_generator=network_generator,
+                        return_details=True,
+                    )
+
+                    # --- 真の忠実度 true_fid_by_path を per_pair_details に注入 ---
+                    # キーは est_fid_by_path のキー体系(整数1..Lに正規化)に合わせる。無ければ 1..L。
+                    for d, det in enumerate(per_pair_details):
+                        true_list = fidelity_bank[d]              # 0-origin list of true fidelities
+                        est_map = det.get("est_fid_by_path", {})  # 本来 {1..L} を想定
+
+                        L = len(true_list)
+
+                        # 1) 推定辞書を 1-origin に正規化(0-originで来た場合でも吸収)
+                        if est_map:
+                            est_map_norm = normalize_to_1origin(
+                                {int(k): float(v) for k, v in est_map.items()}, L
+                            )
+                        else:
+                            est_map_norm = {}  # 未測定なら空のまま(値計算側で0寄与にする)
+
+                        # 2) 真値辞書を 1-origin で構築(内部 true_list は 0-origin なので to_idx0)
+                        true_map = {pid: float(true_list[to_idx0(pid)]) for pid in range(1, L + 1)}
+
+                        # 3) 厳格検査(任意だが、デバッグの早期検出に有用)
+                        if est_map_norm and not is_keys_1origin(est_map_norm.keys(), L):
+                            raise RuntimeError(f"[inject] est_fid_by_path keys not 1..{L} (pair={d})")
+
+                        det["est_fid_by_path"]  = est_map_norm
+                        det["true_fid_by_path"] = true_map
+
+                    data[name][k].append({
+                        "per_pair_results": per_pair_results,
+                        "per_pair_details": per_pair_details,
+                        "total_cost": total_cost,
+                        "importance_list": imp_list_r
+                    })
+
+        payload = {"config": config, "budget_list": list(budget_list), "data": data}
+
+        # アトミック書き込み
+        tmp = cache_path + ".tmp"
+        with open(tmp, "wb") as f:
+            pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
+        os.replace(tmp, cache_path)
+
+        return payload
+
+    finally:
+        if got_lock:
+            try:
+                os.remove(lock_path)
+            except FileNotFoundError:
+                pass
+
+# =========================
+# 1) Accuracy: 平均 ± 95%CI
+# =========================
+def plot_accuracy_vs_budget(
+    budget_list, scheduler_names, noise_model,
+    node_path_list, importance_list,
+    bounces=(1,2,3,4), repeat=10,
+    importance_mode="fixed", importance_uniform=(0.0,1.0), seed=None,
+    verbose=True, print_every=1,
+):
+    file_name = f"plot_accuracy_vs_budget_{noise_model}"
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+
+    payload = _run_or_load_shared_sweep(
+        budget_list, scheduler_names, noise_model,
+        node_path_list, importance_list,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform, seed=seed,
+        verbose=verbose, print_every=print_every,
+    )
+
+    # 収集: 予算ごとの正解率(0/1)の配列を溜める
+    results = {name: {"accs": [[] for _ in budget_list]} for name in scheduler_names}
+    for name in scheduler_names:
+        for k in range(len(budget_list)):
+            for run in payload["data"][name][k]:
+                per_pair_results = run["per_pair_results"]
+
+                # per_pair_results の要素を bool に正規化して 0/1 に変換
+                vals = []
+                for r in per_pair_results:
+                    if isinstance(r, tuple):
+                        c = r[0]
+                    elif isinstance(r, (int, float, bool)):
+                        c = bool(r)
+                    else:
+                        raise TypeError(
+                            f"per_pair_results element has unexpected type: {type(r)} -> {r}"
+                        )
+                    vals.append(1.0 if c else 0.0)
+
+                acc = float(np.mean(vals)) if vals else 0.0
+                results[name]["accs"][k].append(acc)
+
+    # plot (mean ± 95%CI)
+    plt.rc("axes", prop_cycle=default_cycler)
+    fig, ax = plt.subplots(figsize=(8, 5), constrained_layout=True)
+    xs = list(budget_list)
+
+    for name, data in results.items():
+        means, halfs = [], []
+        for vals in data["accs"]:
+            m, h = mean_ci95(vals)  # viz.plots.mean_ci95 を使用
+            means.append(m); halfs.append(h)
+        means = np.asarray(means); halfs = np.asarray(halfs)
+
+        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
+        ax.plot(xs, means, linewidth=2.0, label=label)
+        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
+
+    ax.set_xlabel("Total Budget (C)")
+    ax.set_ylabel("Average Correctness (mean ± 95% CI)")
+    ax.grid(True); ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
+
+    pdf = os.path.join(outdir, f"{file_name}.pdf")  
+    plt.savefig(pdf)
+    if shutil.which("pdfcrop"):
+        os.system(f'pdfcrop --margins "8 8 8 8" {pdf} {pdf}')
+    _log(f"Saved: {pdf}")
+
+# =========================
+# 2) Value vs Used(x=実コスト平均, y=Σ_d I_d * true_fid(j*_d) の平均±95%CI)
+#    ※ j*_d は宛先 d における「推定忠実度が最大」のリンク(path_id は 1..L)
+# =========================
+def plot_value_vs_used(
+    budget_list, scheduler_names, noise_model,
+    node_path_list, importance_list,
+    bounces=(1,2,3,4), repeat=10, importance_mode="fixed", importance_uniform=(0.0,1.0), seed=None,
+    verbose=True, print_every=1,
+):
+    file_name = f"plot_value_vs_used_{noise_model}"
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+
+    payload = _run_or_load_shared_sweep(
+        budget_list, scheduler_names, noise_model,
+        node_path_list, importance_list,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform, seed=seed,
+        verbose=verbose, print_every=print_every,
+    )
+
+    results = {name: {"values": [[] for _ in budget_list], "costs": [[] for _ in budget_list]} for name in scheduler_names}
+    for name in scheduler_names:
+        for k in range(len(budget_list)):
+            for run in payload["data"][name][k]:
+                per_pair_details = run["per_pair_details"]
+                total_cost = int(run["total_cost"])
+
+                # y: value = Σ_d I_d * true_fid(j*_d)
+                #   where j*_d = argmax_l est_fid_by_path[d][l]
+                value = 0.0
+                I_used = run.get("importance_list", importance_list)
+
+                for d, det in enumerate(per_pair_details):
+                    est   = det.get("est_fid_by_path", {})   # {path_id(1..L): estimated_fidelity}
+                    true_ = det.get("true_fid_by_path", {})  # {path_id(1..L): true_fidelity}
+
+                    # 1) 真値辞書が無いのは設定不整合 → 例外で明示
+                    if not true_:
+                        raise RuntimeError(f"[value] true_fid_by_path missing for pair {d}")
+
+                    # 2') 1本でも推定があれば、その時点の推定最大 j* を選び、その『真の忠実度』を使う
+                    if est:
+                        j_star = max(est, key=lambda l: float(est.get(l, 0.0)))
+                        if j_star not in true_:
+                            raise RuntimeError(
+                                f"[value] true_fid_by_path lacks j* (pair={d}, j*={j_star})."
+                            )
+                        best_true = float(true_[j_star])
+                    else:
+                        # 推定が全く無ければ 0 寄与(従来どおり)
+                        best_true = 0.0
+
+                    I = float(I_used[d]) if d < len(I_used) else 1.0
+                    value += I * best_true
+
+                results[name]["values"][k].append(float(value))
+                results[name]["costs"][k].append(total_cost)
+
+    # plot (y に 95%CI の帯を表示)
+    plt.rc("axes", prop_cycle=default_cycler)
+    fig, ax = plt.subplots(figsize=(8, 5), constrained_layout=True)
+
+    for name, dat in results.items():
+        # x は各予算での使用コストの平均
+        x_means = [float(np.mean(v)) if v else 0.0 for v in dat["costs"]]
+        # y は各予算での value(上で定義)の平均 ± 95%CI
+        y_means, y_halfs = [], []
+        for vals in dat["values"]:
+            m, h = mean_ci95(vals)  # viz.plots.mean_ci95
+            y_means.append(float(m))
+            y_halfs.append(float(h))
+
+        x_means = np.asarray(x_means)
+        y_means = np.asarray(y_means)
+        y_halfs = np.asarray(y_halfs)
+
+        label = name.replace("Vanilla NB", "VanillaNB").replace("Succ. Elim. NB", "SuccElimNB")
+        ax.plot(x_means, y_means, linewidth=2.0, marker="o", label=label)
+        ax.fill_between(x_means, y_means - y_halfs, y_means + y_halfs, alpha=0.25)
+
+    ax.set_xlabel("Total Measured Cost (used)")
+    ax.set_ylabel("Σ_d I_d · true_fid(j*_d) (mean ± 95% CI)")
+    ax.grid(True); ax.legend(title="Scheduler")
+
+    pdf = os.path.join(outdir, f"{file_name}.pdf")   
+    plt.savefig(pdf)
+    if shutil.which("pdfcrop"):
+        os.system(f'pdfcrop --margins "8 8 8 8" {pdf} {pdf}')
+    _log(f"Saved: {pdf}")
+
+
+def plot_value_vs_budget(
+    budget_list, scheduler_names, noise_model,
+    node_path_list, importance_list,
+    bounces=(1,2,3,4), repeat=10, importance_mode="fixed", importance_uniform=(0.0,1.0), seed=None,
+    verbose=True, print_every=1,
+):
+    """
+    x=割り当て予算(budget_list)、y=Σ_d I_d * true_fid(j*_d) の平均±95%CI を描画する。
+    ※ j*_d は「その時点の推定最大リンク」。全リンク未測定でも、推定が1本でもあればその j* を使う。
+    出力: outputs/plot_value_vs_budget_{noise_model}.pdf
+    """
+    file_name = f"plot_value_vs_budget_{noise_model}"
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+
+    # 共有スイープ(キャッシュ)を実行/読込
+    payload = _run_or_load_shared_sweep(
+        budget_list, scheduler_names, noise_model,
+        node_path_list, importance_list,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform, seed=seed,
+        verbose=verbose, print_every=print_every,
+    )
+
+    # スケジューラごと・予算ごとに value と(参考)used コストを蓄積
+    results = {name: {"values": [[] for _ in budget_list], "costs": [[] for _ in budget_list]} for name in scheduler_names}
+
+    for name in scheduler_names:
+        for k in range(len(budget_list)):
+            for run in payload["data"][name][k]:
+                per_pair_details = run["per_pair_details"]
+                total_cost = int(run["total_cost"])  # 参考(今回はxに使わない)
+                I_used = run.get("importance_list", importance_list)
+
+                # y: value = Σ_d I_d * true_fid(j*_d)
+                # j*_d = argmax_l est_fid_by_path[d][l](1本でも推定があればその時点のj*を採用)
+                value = 0.0
+                for d, det in enumerate(per_pair_details):
+                    est   = det.get("est_fid_by_path", {})   # {path_id(1..L): est}
+                    true_ = det.get("true_fid_by_path", {})  # {path_id(1..L): true}
+
+                    # 真値辞書が無いのは設定不整合
+                    if not true_:
+                        raise RuntimeError(f"[value] true_fid_by_path missing for pair {d}")
+
+                    # 推定が1本でもあれば、その時点の j* の『真の忠実度』を使う
+                    if est:
+                        j_star = max(est, key=lambda l: float(est.get(l, 0.0)))
+                        if j_star not in true_:
+                            raise RuntimeError(f"[value] true_fid_by_path lacks j* (pair={d}, j*={j_star}).")
+                        best_true = float(true_[j_star])
+                    else:
+                        # 推定が全く無ければ寄与0
+                        best_true = 0.0
+
+                    I = float(I_used[d]) if d < len(I_used) else 1.0
+                    value += I * best_true
+
+                results[name]["values"][k].append(float(value))
+                results[name]["costs"][k].append(total_cost)  # y軸には使わないが保持
+
+    # === プロット(x: 割り当て予算 = budget_list, y: value 平均±95%CI) ===
+    plt.rc("axes", prop_cycle=default_cycler)
+    fig, ax = plt.subplots(figsize=(8, 5), constrained_layout=True)
+
+    x_vals = np.asarray(list(budget_list), dtype=float)  # 横軸は割り当て予算
+
+    for name, dat in results.items():
+        # y は各予算での value の平均 ± 95%CI
+        y_means, y_halfs = [], []
+        for vals in dat["values"]:
+            m, h = mean_ci95(vals)
+            y_means.append(float(m))
+            y_halfs.append(float(h))
+
+        y_means = np.asarray(y_means)
+        y_halfs = np.asarray(y_halfs)
+
+        label = name.replace("Vanilla NB", "VanillaNB").replace("Succ. Elim. NB", "SuccElimNB")
+        ax.plot(x_vals, y_means, linewidth=2.0, marker="o", label=label)
+        ax.fill_between(x_vals, y_means - y_halfs, y_means + y_halfs, alpha=0.25)
+
+    ax.set_xlabel("Total Budget (C)")
+    ax.set_ylabel("Σ_d I_d · true_fid(j*_d) (mean ± 95% CI)")
+    ax.grid(True); ax.legend(title="Scheduler")
+
+    pdf = os.path.join(outdir, f"{file_name}.pdf")
+    plt.savefig(pdf)
+    if shutil.which("pdfcrop"):
+        os.system(f'pdfcrop --margins "8 8 8 8" {pdf} {pdf}')
+    _log(f"Saved: {pdf}")

+ 420 - 0
new_add_linkselfie/evaluationgap.py

@@ -0,0 +1,420 @@
+# evaluationgap.py — Gap sweep: x = gap, y = accuracy (mean ± 95% CI)
+# Supports:
+#   (2a) Random gap mode   : alpha = alpha_base, beta = alpha - gap, then random sampling (utils.fidelity)
+#   (2b) Fixed  gap mode   : deterministic arithmetic sequence with gap       (utils.fidelity)
+#
+# Both modes inject true_fid_by_path with 1-origin keys and normalize est_fid_by_path to 1-origin.
+
+import os
+import json
+import time
+import pickle
+import hashlib
+import shutil
+from typing import List, Sequence, Dict, Any, Tuple
+
+import numpy as np
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+from cycler import cycler
+
+from network import QuantumNetwork
+from schedulers import run_scheduler
+from viz.plots import mean_ci95
+
+from utils.ids import to_idx0, normalize_to_1origin, is_keys_1origin
+from utils.fidelity import (
+    generate_fidelity_list_fix_gap,
+    _generate_fidelity_list_random_rng,
+)
+
+# ---- Matplotlib style (align with evaluation.py) ----
+mpl.rcParams["figure.constrained_layout.use"] = True
+mpl.rcParams["savefig.bbox"] = "tight"
+mpl.rcParams["font.family"] = "serif"
+mpl.rcParams["font.serif"] = [
+    "TeX Gyre Termes",
+    "Nimbus Roman",
+    "Liberation Serif",
+    "DejaVu Serif",
+]
+mpl.rcParams["font.size"] = 20
+
+_default_cycler = (
+    cycler(color=["#4daf4a", "#377eb8", "#e41a1c", "#984ea3", "#ff7f00", "#a65628"])
+    + cycler(marker=["s", "v", "o", "x", "*", "+"])
+    + cycler(linestyle=[":", "--", "-", "-.", "--", ":"])
+)
+plt.rc("axes", prop_cycle=_default_cycler)
+
+
+# -----------------------------
+# Cache helpers (gap sweep)
+# -----------------------------
+def _gap_sweep_signature(gap_list: Sequence[float], scheduler_names: Sequence[str], noise_model: str,
+                         node_path_list: Sequence[int], importance_list: Sequence[float],
+                         bounces: Sequence[int], repeat: int,
+                         mode: str,   # "random" or "fixed"
+                         importance_mode: str = "fixed", importance_uniform: Tuple[float, float] = (0.0, 1.0),
+                         seed: int = None, alpha_base: float = 0.95, variance: float = 0.10,
+                         C_total: int = 5000) -> Tuple[Dict[str, Any], str]:
+    payload = {
+        "gap_list": list(map(float, gap_list)),
+        "scheduler_names": list(scheduler_names),
+        "noise_model": str(noise_model),
+        "node_path_list": list(map(int, node_path_list)),
+        "importance_list": list(importance_list) if importance_list is not None else None,
+        "importance_mode": str(importance_mode),
+        "importance_uniform": list(importance_uniform) if importance_uniform is not None else None,
+        "bounces": list(map(int, bounces)),
+        "repeat": int(repeat),
+        "seed": int(seed) if seed is not None else None,
+        "mode": str(mode),  # "random" / "fixed"
+        "alpha_base": float(alpha_base),
+        "variance": float(variance),
+        "C_total": int(C_total),
+        "version": 4,  # schema: 1-origin injection & normalized est keys; fidelity_bank per gap stored
+    }
+    sig = hashlib.md5(json.dumps(payload, sort_keys=True).encode("utf-8")).hexdigest()[:10]
+    return payload, sig
+
+
+def _shared_gap_path(noise_model: str, sig: str) -> str:
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+    return os.path.join(outdir, f"shared_gap_{noise_model}_{sig}.pickle")
+
+
+def _run_or_load_shared_gap_sweep(
+    gap_list: Sequence[float], scheduler_names: Sequence[str], noise_model: str,
+    node_path_list: Sequence[int], importance_list: Sequence[float],
+    bounces=(1, 2, 3, 4), repeat: int = 10,
+    importance_mode: str = "fixed", importance_uniform: Tuple[float, float] = (0.0, 1.0),
+    seed: int = None, alpha_base: float = 0.95, variance: float = 0.10,
+    C_total: int = 5000, mode: str = "random",
+    verbose: bool = True, print_every: int = 1,
+) -> Dict[str, Any]:
+    """
+    For each gap in gap_list, run `repeat` times. For each (gap, repeat) we create ONE fidelity_bank
+    and reuse it for:
+      - network generation (per pair)
+      - true_fid_by_path injection
+    so that there is no re-sampling mismatch.
+    """
+    config, sig = _gap_sweep_signature(
+        gap_list, scheduler_names, noise_model,
+        node_path_list, importance_list, bounces, repeat,
+        mode=mode,
+        importance_mode=importance_mode, importance_uniform=importance_uniform,
+        seed=seed, alpha_base=alpha_base, variance=variance, C_total=C_total
+    )
+    cache_path = _shared_gap_path(noise_model, sig)
+    lock_path = cache_path + ".lock"
+    STALE_LOCK_SECS = 6 * 60 * 60
+    HEARTBEAT_EVERY = 5.0
+
+    rng = np.random.default_rng(seed)
+
+    # Fast path: cached
+    if os.path.exists(cache_path):
+        if verbose:
+            print(f"[gap-shared] Load cached: {os.path.basename(cache_path)}", flush=True)
+        with open(cache_path, "rb") as f:
+            return pickle.load(f)
+
+    # Lock acquisition (single writer)
+    got_lock = False
+    while True:
+        try:
+            fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+            os.close(fd)
+            got_lock = True
+            break
+        except FileExistsError:
+            if os.path.exists(cache_path):
+                with open(cache_path, "rb") as f:
+                    return pickle.load(f)
+            try:
+                age = time.time() - os.path.getmtime(lock_path)
+            except OSError:
+                age = 0
+            if age > STALE_LOCK_SECS:
+                try: os.remove(lock_path)
+                except FileNotFoundError: pass
+                continue
+            if verbose:
+                print("[gap-shared] Waiting for cache to be ready...", flush=True)
+            time.sleep(1.0)
+
+    try:
+        if verbose:
+            print(f"[gap-shared] Run gap sweep and cache to: {os.path.basename(cache_path)}", flush=True)
+
+        data = {name: {k: [] for k in range(len(gap_list))} for name in scheduler_names}
+        last_hb = time.time()
+
+        for r in range(repeat):
+            if verbose and ((r + 1) % print_every == 0 or r == 0):
+                print(f"[gap-shared] Repeat {r+1}/{repeat}", flush=True)
+
+            # Importance per repeat
+            if str(importance_mode).lower() == "uniform":
+                a, b = map(float, importance_uniform)
+                imp_list_r = [float(rng.uniform(a, b)) for _ in node_path_list]
+            else:
+                imp_list_r = list(importance_list)
+
+            # Sweep gaps
+            for k, gap in enumerate(gap_list):
+                if verbose:
+                    print(f"=== [GAP {noise_model}] gap={gap} ({k+1}/{len(gap_list)}), mode={mode} ===", flush=True)
+
+                # Heartbeat
+                now = time.time()
+                if now - last_hb >= HEARTBEAT_EVERY:
+                    try: os.utime(lock_path, None)
+                    except FileNotFoundError: pass
+                    last_hb = now
+
+                # (重要) gap×repeat ごとに fidelity_bank を先に作って保存 → 再利用
+                fidelity_bank: List[List[float]] = []
+                for pair_idx, path_num in enumerate(node_path_list):
+                    if mode == "fixed":
+                        # 等差列: fidelity_max から gap ずつ下げる
+                        fids = generate_fidelity_list_fix_gap(
+                            path_num=int(path_num), gap=float(gap), fidelity_max=1.0
+                        )
+                    else:
+                        # ランダム: alpha=alpha_base, beta=alpha_base-gap
+                        alpha = float(alpha_base)
+                        beta = float(alpha_base) - float(gap)
+                        fids = _generate_fidelity_list_random_rng(
+                            rng=rng, path_num=int(path_num),
+                            alpha=alpha, beta=beta, variance=float(variance)
+                        )
+                    fidelity_bank.append(fids)
+
+                # network generator uses the saved bank
+                def network_generator(path_num: int, pair_idx: int):
+                    return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
+
+                for name in scheduler_names:
+                    per_pair_results, total_cost, per_pair_details = run_scheduler(
+                        node_path_list=node_path_list, importance_list=imp_list_r,
+                        scheduler_name=name,
+                        bounces=list(bounces),
+                        C_total=int(C_total),
+                        network_generator=network_generator,
+                        return_details=True,
+                    )
+
+                    # Inject truth (1..L) and normalize estimated map (to 1..L)
+                    for d, det in enumerate(per_pair_details):
+                        L = int(node_path_list[d])
+                        est_map = det.get("est_fid_by_path", {})
+
+                        if est_map:
+                            est_map_norm = normalize_to_1origin({int(k): float(v) for k, v in est_map.items()}, L)
+                        else:
+                            est_map_norm = {}
+
+                        # true map from the saved fidelity_bank (no re-sampling)
+                        true_list = fidelity_bank[d]  # 0-origin
+                        true_map = {pid: float(true_list[to_idx0(pid)]) for pid in range(1, L + 1)}
+
+                        if est_map_norm and not is_keys_1origin(est_map_norm.keys(), L):
+                            raise RuntimeError(f"[inject] est_fid_by_path keys not 1..{L} (pair={d})")
+
+                        det["est_fid_by_path"]  = est_map_norm
+                        det["true_fid_by_path"] = true_map
+
+                    data[name][k].append({
+                        "per_pair_results": per_pair_results,
+                        "per_pair_details": per_pair_details,
+                        "total_cost": total_cost,
+                        "importance_list": imp_list_r,
+                        "gap": float(gap),
+                        "C_total": int(C_total),
+                        "alpha_base": float(alpha_base),
+                        "variance": float(variance),
+                        "mode": str(mode),
+                        "node_path_list": list(map(int, node_path_list)),
+                    })
+
+        payload = {
+            "config": config,
+            "gap_list": list(map(float, gap_list)),
+            "data": data,
+        }
+
+        # atomic write
+        tmp = cache_path + ".tmp"
+        with open(tmp, "wb") as f:
+            pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
+        os.replace(tmp, cache_path)
+
+        return payload
+
+    finally:
+        if got_lock:
+            try: os.remove(lock_path)
+            except FileNotFoundError: pass
+
+
+# -----------------------------
+# Public APIs
+# -----------------------------
+def plot_accuracy_vs_gap(
+    gap_list: Sequence[float], scheduler_names: Sequence[str], noise_model: str,
+    node_path_list: Sequence[int], importance_list: Sequence[float],
+    bounces=(1, 2, 3, 4), repeat: int = 10,
+    importance_mode: str = "fixed", importance_uniform: Tuple[float, float] = (0.0, 1.0),
+    seed: int = None, alpha_base: float = 0.95, variance: float = 0.10,
+    C_total_override: int = None,
+    verbose: bool = True, print_every: int = 1,
+) -> str:
+    """
+    (2a) Gap vs Accuracy — Random mode (utils.fidelity)
+    """
+    file_name = f"plot_accuracy_vs_gap_random_{noise_model}"
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+
+    C_total = int(C_total_override) if C_total_override is not None else 5000
+
+    payload = _run_or_load_shared_gap_sweep(
+        gap_list, scheduler_names, noise_model,
+        node_path_list, importance_list,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform,
+        seed=seed, alpha_base=alpha_base, variance=variance,
+        C_total=C_total, mode="random",
+        verbose=verbose, print_every=print_every,
+    )
+
+    # Collect accuracy arrays per gap
+    results = {name: {"accs": [[] for _ in gap_list]} for name in scheduler_names}
+    for name in scheduler_names:
+        for k in range(len(gap_list)):
+            for run in payload["data"][name][k]:
+                per_pair_results = run["per_pair_results"]
+                vals = []
+                for r in per_pair_results:
+                    if isinstance(r, tuple):
+                        c = r[0]
+                    elif isinstance(r, (int, float, bool)):
+                        c = bool(r)
+                    else:
+                        raise TypeError(f"per_pair_results element has unexpected type: {type(r)} -> {r}")
+                    vals.append(1.0 if c else 0.0)
+                acc = float(np.mean(vals)) if vals else 0.0
+                results[name]["accs"][k].append(acc)
+
+    # Plot
+    plt.rc("axes", prop_cycle=_default_cycler)
+    fig, ax = plt.subplots(figsize=(8, 5), constrained_layout=True)
+    xs = list(map(float, gap_list))
+
+    for name, data in results.items():
+        means, halfs = [], []
+        for vals in data["accs"]:
+            m, h = mean_ci95(vals)
+            means.append(m); halfs.append(h)
+        means = np.asarray(means); halfs = np.asarray(halfs)
+
+        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
+        ax.plot(xs, means, linewidth=2.0, label=label)
+        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
+
+    ax.set_xlabel("Gap (alpha - beta)")
+    ax.set_ylabel("Average Correctness (mean ± 95% CI)")
+    ax.grid(True); ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
+
+    pdf = os.path.join(outdir, f"{file_name}.pdf")
+    plt.savefig(pdf)
+    if shutil.which("pdfcrop"):
+        os.system(f'pdfcrop --margins "8 8 8 8" "{pdf}" "{pdf}"')
+    print(f"Saved: {pdf}", flush=True)
+    return pdf
+
+
+def plot_accuracy_vs_gap_fixgap(
+    gap_list: Sequence[float], scheduler_names: Sequence[str], noise_model: str,
+    node_path_list: Sequence[int], importance_list: Sequence[float],
+    bounces=(1, 2, 3, 4), repeat: int = 10,
+    importance_mode: str = "fixed", importance_uniform: Tuple[float, float] = (0.0, 1.0),
+    seed: int = None, fidelity_max: float = 1.0,
+    C_total_override: int = None,
+    verbose: bool = True, print_every: int = 1,
+) -> str:
+    """
+    (2b) Gap vs Accuracy — Fixed arithmetic sequence mode (utils.fidelity)
+    """
+    # 固定列では rng は使わないが、署名の再現性のため seed を渡しておく
+    file_name = f"plot_accuracy_vs_gap_fixed_{noise_model}"
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+
+    # alpha_base/variance は未使用だが、シグネチャ整合のためデフォルト値を渡す
+    C_total = int(C_total_override) if C_total_override is not None else 5000
+
+    payload = _run_or_load_shared_gap_sweep(
+        gap_list, scheduler_names, noise_model,
+        node_path_list, importance_list,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform,
+        seed=seed, alpha_base=0.95, variance=0.10,
+        C_total=C_total, mode="fixed",
+        verbose=verbose, print_every=print_every,
+    )
+
+    # Collect accuracy arrays per gap
+    results = {name: {"accs": [[] for _ in gap_list]} for name in scheduler_names}
+    for name in scheduler_names:
+        for k in range(len(gap_list)):
+            for run in payload["data"][name][k]:
+                per_pair_results = run["per_pair_results"]
+                vals = []
+                for r in per_pair_results:
+                    if isinstance(r, tuple):
+                        c = r[0]
+                    elif isinstance(r, (int, float, bool)):
+                        c = bool(r)
+                    else:
+                        raise TypeError(f"per_pair_results element has unexpected type: {type(r)} -> {r}")
+                    vals.append(1.0 if c else 0.0)
+                acc = float(np.mean(vals)) if vals else 0.0
+                results[name]["accs"][k].append(acc)
+
+    # Plot
+    plt.rc("axes", prop_cycle=_default_cycler)
+    fig, ax = plt.subplots(figsize=(8, 5), constrained_layout=True)
+    xs = list(map(float, gap_list))
+
+    for name, data in results.items():
+        means, halfs = [], []
+        for vals in data["accs"]:
+            m, h = mean_ci95(vals)
+            means.append(m)
+            halfs.append(h)
+        means = np.asarray(means)
+        halfs = np.asarray(halfs)
+
+        label = name.replace("Vanilla NB", "VanillaNB").replace("Succ. Elim. NB", "SuccElimNB")
+        ax.plot(xs, means, linewidth=2.0, label=label)
+        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
+
+    ax.set_xlabel("Gap (arithmetic sequence)")
+    ax.set_ylabel("Average Correctness (mean ± 95% CI)")
+    ax.grid(True)
+    ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
+
+    pdf = os.path.join(outdir, f"{file_name}.pdf")
+    plt.savefig(pdf)
+    if shutil.which("pdfcrop"):
+        os.system(f'pdfcrop --margins "8 8 8 8" "{pdf}" "{pdf}"')
+    print(f"Saved: {pdf}", flush=True)
+    return pdf

+ 357 - 0
new_add_linkselfie/evaluationgap.py~

@@ -0,0 +1,357 @@
+
+# evaluationgap.py — Sweep x-axis over "gap", y-axis = accuracy (mean ± 95% CI)
+# Random fidelity generator version where alpha - beta = gap.
+import os
+import json
+import time
+import pickle
+import hashlib
+import shutil
+
+import numpy as np
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+from cycler import cycler
+
+from network import QuantumNetwork
+from schedulers import run_scheduler
+from viz.plots import mean_ci95
+
+# ---- Matplotlib global style (match evaluation.py) ----
+mpl.rcParams["figure.constrained_layout.use"] = True
+mpl.rcParams["savefig.bbox"] = "tight"
+mpl.rcParams["font.family"] = "serif"
+mpl.rcParams["font.serif"] = [
+    "TeX Gyre Termes",
+    "Nimbus Roman",
+    "Liberation Serif",
+    "DejaVu Serif",
+]
+mpl.rcParams["font.size"] = 20
+
+default_cycler = (
+    cycler(color=["#4daf4a", "#377eb8", "#e41a1c", "#984ea3", "#ff7f00", "#a65628"])
+    + cycler(marker=["s", "v", "o", "x", "*", "+"])
+    + cycler(linestyle=[":", "--", "-", "-.", "--", ":"])
+)
+plt.rc("axes", prop_cycle=default_cycler)
+
+
+# -----------------------------
+# Random fidelity generators (alpha - beta = gap)
+# -----------------------------
+def _generate_fidelity_list_random_rng(rng, path_num, alpha=0.95, beta=0.85, variance=0.1):
+    """
+    Generate `path_num` fidelities with top-1 mean alpha and others mean beta,
+    each sampled from Normal(mu, variance), clamped to [0.8, 1.0].
+    Ensures a visible top-1 gap (>0.02) in the sorted list.
+    """
+    while True:
+        mean = [alpha] + [beta] * (path_num - 1)
+        res = []
+        for mu in mean:
+            # Rejection sample into [0.8, 1.0]
+            while True:
+                r = rng.normal(mu, variance)
+                if 0.8 <= r <= 1.0:
+                    break
+            res.append(float(r))
+        sorted_res = sorted(res, reverse=True)
+        if len(sorted_res) >= 2 and (sorted_res[0] - sorted_res[1]) > 0.02:
+            return res
+
+
+def _fidelity_list_gap_random(path_num, gap, rng,
+                              alpha_base=0.95, variance=0.1):
+    """
+    Build a fidelity list of length `path_num` using:
+      alpha = alpha_base
+      beta  = alpha - gap
+    With random jitter via Normal(mu, variance), clamped to [0.8, 1.0].
+    """
+    alpha = float(alpha_base)
+    beta  = float(alpha_base - gap)
+    # keep beta within [0.8, alpha)
+    beta = min(max(beta, 0.8), max(alpha - 1e-6, 0.8))
+    return _generate_fidelity_list_random_rng(rng, path_num, alpha=alpha, beta=beta, variance=variance)
+
+
+# -----------------------------
+# Cache helpers (gap sweep)
+# -----------------------------
+def _gap_sweep_signature(gap_list, scheduler_names, noise_model,
+                         node_path_list, importance_list, bounces, repeat,
+                         importance_mode="fixed", importance_uniform=(0.0, 1.0), seed=None,
+                         alpha_base=0.95, variance=0.10):
+    payload = {
+        "gap_list": list(map(float, gap_list)),
+        "scheduler_names": list(scheduler_names),
+        "noise_model": str(noise_model),
+        "node_path_list": list(node_path_list),
+        "importance_list": list(importance_list) if importance_list is not None else None,
+        "importance_mode": str(importance_mode),
+        "importance_uniform": list(importance_uniform) if importance_uniform is not None else None,
+        "bounces": list(bounces),
+        "repeat": int(repeat),
+        "seed": int(seed) if seed is not None else None,
+        # fidelity-generation mode & params
+        "fidelity_mode": "random_gap_alpha_beta",
+        "alpha_base": float(alpha_base),
+        "variance": float(variance),
+        "version": 2,
+    }
+    sig = hashlib.md5(json.dumps(payload, sort_keys=True).encode("utf-8")).hexdigest()[:10]
+    return payload, sig
+
+
+def _shared_gap_path(noise_model, sig):
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+    return os.path.join(outdir, f"shared_gap_{noise_model}_{sig}.pickle")
+
+
+def _run_or_load_shared_gap_sweep(
+    gap_list, scheduler_names, noise_model,
+    node_path_list, importance_list,
+    bounces=(1, 2, 3, 4), repeat=10,
+    importance_mode="fixed", importance_uniform=(0.0, 1.0),
+    seed=None, alpha_base=0.95, variance=0.10,
+    C_total=5000,
+    verbose=True, print_every=1,
+):
+    """
+    For each gap in gap_list, run `repeat` times over the same topology generator (per-repeat),
+    and evaluate every scheduler. Cache the whole sweep with a single file lock.
+    """
+    config, sig = _gap_sweep_signature(
+        gap_list, scheduler_names, noise_model,
+        node_path_list, importance_list, bounces, repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform, seed=seed,
+        alpha_base=alpha_base, variance=variance,
+    )
+    cache_path = _shared_gap_path(noise_model, sig)
+    lock_path = cache_path + ".lock"
+    STALE_LOCK_SECS = 6 * 60 * 60
+    HEARTBEAT_EVERY = 5.0
+
+    rng = np.random.default_rng(seed)
+
+    # Fast path: cached
+    if os.path.exists(cache_path):
+        with open(cache_path, "rb") as f:
+            return pickle.load(f)
+
+    # Lock acquisition loop
+    got_lock = False
+    while True:
+        try:
+            fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+            os.close(fd)
+            got_lock = True
+            break
+        except FileExistsError:
+            # If cache appeared while waiting, load immediately.
+            if os.path.exists(cache_path):
+                with open(cache_path, "rb") as f:
+                    return pickle.load(f)
+            try:
+                age = time.time() - os.path.getmtime(lock_path)
+            except OSError:
+                age = 0
+            if age > STALE_LOCK_SECS:
+                try:
+                    os.remove(lock_path)
+                except FileNotFoundError:
+                    pass
+                continue
+            time.sleep(1.0)
+
+    try:
+        if verbose:
+            print(f"[gap-shared] Run gap sweep and cache to: {os.path.basename(cache_path)}", flush=True)
+
+        data = {name: {k: [] for k in range(len(gap_list))} for name in scheduler_names}
+        last_hb = time.time()
+
+        # Repeat loop: per-repeat we will re-sample importance (if requested)
+        for r in range(repeat):
+            if verbose and ((r + 1) % print_every == 0 or r == 0):
+                print(f"[gap-shared] Repeat {r+1}/{repeat}", flush=True)
+
+            # Importance list per repeat
+            if str(importance_mode).lower() == "uniform":
+                a, b = map(float, importance_uniform)
+                imp_list_r = [float(rng.uniform(a, b)) for _ in node_path_list]
+            else:
+                imp_list_r = list(importance_list)
+
+            # Sweep over gaps
+            for k, gap in enumerate(gap_list):
+                if verbose:
+                    print(f"=== [GAP {noise_model}] gap={gap} ({k+1}/{len(gap_list)}) ===", flush=True)
+
+                # Heartbeat
+                now = time.time()
+                if now - last_hb >= HEARTBEAT_EVERY:
+                    try:
+                        os.utime(lock_path, None)
+                    except FileNotFoundError:
+                        pass
+                    last_hb = now
+
+                # Network generator for this 'gap' (fresh fidelities for each pair)
+                def network_generator(path_num, pair_idx):
+                    fids = _fidelity_list_gap_random(
+                        path_num=path_num,
+                        gap=float(gap),
+                        rng=rng,
+                        alpha_base=alpha_base,
+                        variance=variance,
+                    )
+                    return QuantumNetwork(path_num, fids, noise_model)
+
+                for name in scheduler_names:
+                    per_pair_results, total_cost, per_pair_details = run_scheduler(
+                        node_path_list=node_path_list, importance_list=imp_list_r,
+                        scheduler_name=name,
+                        bounces=list(bounces),
+                        C_total=int(C_total),
+                        network_generator=network_generator,
+                        return_details=True,
+                    )
+                    data[name][k].append({
+                        "per_pair_results": per_pair_results,
+                        "per_pair_details": per_pair_details,
+                        "total_cost": total_cost,
+                        "importance_list": imp_list_r,
+                        "gap": float(gap),
+                        "C_total": int(C_total),
+                        "alpha_base": float(alpha_base),
+                        "variance": float(variance),
+                    })
+
+        payload = {
+            "config": config,
+            "gap_list": list(map(float, gap_list)),
+            "data": data,
+        }
+
+        tmp = cache_path + ".tmp"
+        with open(tmp, "wb") as f:
+            pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
+        os.replace(tmp, cache_path)
+
+        return payload
+
+    finally:
+        if got_lock:
+            try:
+                os.remove(lock_path)
+            except FileNotFoundError:
+                pass
+
+
+# -----------------------------
+# Public API: plot (mean ± 95% CI)
+# -----------------------------
+def plot_accuracy_vs_gap(
+    gap_list, scheduler_names, noise_model,
+    node_path_list, importance_list,
+    bounces=(1, 2, 3, 4), repeat=10,
+    importance_mode="fixed", importance_uniform=(0.0, 1.0),
+    seed=None,
+    alpha_base=0.95, variance=0.10,
+    C_total_override=None,
+    verbose=True, print_every=1,
+):
+    """
+    Make a plot with x = gap, y = accuracy (mean ± 95% CI).
+    Uses alpha - beta = gap; fidelities are sampled per pair from Normal(mu, variance) clamped to [0.8,1.0].
+    """
+    file_name = f"plot_accuracy_vs_gap_{noise_model}"
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+
+    C_total = int(C_total_override) if C_total_override is not None else 5000
+
+    payload = _run_or_load_shared_gap_sweep(
+        gap_list, scheduler_names, noise_model,
+        node_path_list, importance_list,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform, seed=seed,
+        alpha_base=alpha_base, variance=variance,
+        C_total=C_total,
+        verbose=verbose, print_every=print_every,
+    )
+
+    # Collect accuracy arrays per gap
+    results = {name: {"accs": [[] for _ in gap_list]} for name in scheduler_names}
+    for name in scheduler_names:
+        for k in range(len(gap_list)):
+            for run in payload["data"][name][k]:
+                per_pair_results = run["per_pair_results"]
+                vals = []
+                for r in per_pair_results:
+                    if isinstance(r, tuple):
+                        c = r[0]
+                    elif isinstance(r, (int, float, bool)):
+                        c = bool(r)
+                    else:
+                        raise TypeError(f"per_pair_results element has unexpected type: {type(r)} -> {r}")
+                    vals.append(1.0 if c else 0.0)
+
+                acc = float(np.mean(vals)) if vals else 0.0
+                results[name]["accs"][k].append(acc)
+
+    # Plot
+    plt.rc("axes", prop_cycle=default_cycler)
+    fig, ax = plt.subplots(figsize=(8, 5), constrained_layout=True)
+    xs = list(map(float, gap_list))
+
+    for name, data in results.items():
+        means, halfs = [], []
+        for vals in data["accs"]:
+            m, h = mean_ci95(vals)
+            means.append(m); halfs.append(h)
+        means = np.asarray(means); halfs = np.asarray(halfs)
+
+        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
+        ax.plot(xs, means, linewidth=2.0, label=label)
+        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
+
+    ax.set_xlabel("Gap (alpha - beta)")
+    ax.set_ylabel("Average Correctness (mean ± 95% CI)")
+    ax.grid(True); ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
+
+    pdf = os.path.join(outdir, f"{file_name}.pdf")
+    plt.savefig(pdf)
+    if shutil.which("pdfcrop"):
+        os.system(f'pdfcrop --margins "8 8 8 8" "{pdf}" "{pdf}"')
+    print(f"Saved: {pdf}", flush=True)
+    return pdf
+
+
+if __name__ == "__main__":
+    # Minimal example (safe defaults). Adjust as needed.
+    gaps = [0.005, 0.01, 0.02, 0.03]
+    scheds = ["Vanilla NB", "Succ. Elim. NB", "Greedy Two-Phase"]
+    noise = "Depolar"
+    node_paths = [5, 5, 5]   # 3 pairs, each with 5 candidate links
+    importances = [1.0, 1.0, 1.0]
+
+    plot_accuracy_vs_gap(
+        gap_list=gaps,
+        scheduler_names=scheds,
+        noise_model=noise,
+        node_path_list=node_paths,
+        importance_list=importances,
+        bounces=(1,2,3,4),
+        repeat=5,
+        importance_mode="fixed",
+        seed=42,
+        alpha_base=0.95,
+        variance=0.10,
+        C_total_override=5000,
+    )

+ 339 - 0
new_add_linkselfie/evaluationpair.py

@@ -0,0 +1,339 @@
+# evaluationpair.py — Sweep "number of destination pairs" (x) vs Accuracy (y)
+# Designed to align with evaluation.py pipeline (1-origin keys, utils.ids normalization).
+#
+# Produces: outputs/plot_accuracy_vs_pairs_<noise_model>.pdf
+
+import os
+import time
+import json
+import pickle
+import hashlib
+import shutil
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib as mpl
+from cycler import cycler
+
+from network import QuantumNetwork
+from schedulers import run_scheduler
+from viz.plots import mean_ci95
+
+
+from utils.ids import to_idx0, normalize_to_1origin, is_keys_1origin
+
+from utils.fidelity import (
+    generate_fidelity_list_avg_gap,
+    generate_fidelity_list_fix_gap,
+    generate_fidelity_list_random,
+    _generate_fidelity_list_random_rng,
+)
+
+# ---- Matplotlib style (match evaluation.py) ----
+mpl.rcParams["figure.constrained_layout.use"] = True
+mpl.rcParams["savefig.bbox"] = "tight"
+mpl.rcParams["font.family"] = "serif"
+mpl.rcParams["font.serif"] = [
+    "TeX Gyre Termes",
+    "Nimbus Roman",
+    "Liberation Serif",
+    "DejaVu Serif",
+]
+mpl.rcParams["font.size"] = 20
+
+_default_cycler = (
+    cycler(color=["#4daf4a", "#377eb8", "#e41a1c", "#984ea3", "#ff7f00", "#a65628"])
+    + cycler(marker=["s", "v", "o", "x", "*", "+"])
+    + cycler(linestyle=[":", "--", "-", "-.", "--", ":"])
+)
+plt.rc("axes", prop_cycle=_default_cycler)
+
+
+# =========================
+# Utilities
+# =========================
+def _log(msg: str):
+    print(msg, flush=True)
+
+def _generate_fidelity_list_random_rng(rng: np.random.Generator, path_num: int,
+                                       alpha: float = 0.90, beta: float = 0.85, variance: float = 0.1):
+    """Generate `path_num` link fidelities in [0.8, 1.0], ensuring a small top-1 gap."""
+    while True:
+        mean = [alpha] + [beta] * (path_num - 1)
+        res = []
+        for mu in mean:
+            while True:
+                r = rng.normal(mu, variance)
+                if 0.8 <= r <= 1.0:
+                    break
+            res.append(float(r))
+        sorted_res = sorted(res, reverse=True)
+        if sorted_res[0] - sorted_res[1] > 0.02:
+            return res
+
+
+# =========================
+# Pair-sweep cache helpers
+# =========================
+def _sweep_signature_pairs(pairs_list, paths_per_pair, C_total, scheduler_names, noise_model,
+                           bounces, repeat, importance_mode="fixed", importance_uniform=(0.0,1.0), seed=None):
+    payload = {
+        "pairs_list": list(pairs_list),
+        "paths_per_pair": int(paths_per_pair),
+        "C_total": int(C_total),
+        "scheduler_names": list(scheduler_names),
+        "noise_model": str(noise_model),
+        "bounces": list(bounces),
+        "repeat": int(repeat),
+        "importance_mode": str(importance_mode),
+        "importance_uniform": list(importance_uniform) if importance_uniform is not None else None,
+        "seed": int(seed) if seed is not None else None,
+        "version": 2,  # ★ schema: per_pair_details の est/true_fid_by_path を 1-origin へ統一
+    }
+    sig = hashlib.md5(json.dumps(payload, sort_keys=True).encode("utf-8")).hexdigest()[:10]
+    return payload, sig
+
+def _shared_pair_sweep_path(noise_model: str, sig: str):
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+    return os.path.join(outdir, f"pair_sweep_{noise_model}_{sig}.pickle")
+
+
+def _run_or_load_pair_sweep(
+    pairs_list, paths_per_pair, C_total, scheduler_names, noise_model,
+    bounces=(1,2,3,4), repeat=10,
+    importance_mode="fixed", importance_uniform=(0.0,1.0),
+    seed=None,
+    verbose=True, print_every=1,
+):
+    config, sig = _sweep_signature_pairs(
+        pairs_list, paths_per_pair, C_total, scheduler_names, noise_model,
+        bounces, repeat, importance_mode=importance_mode, importance_uniform=importance_uniform, seed=seed
+    )
+    cache_path = _shared_pair_sweep_path(noise_model, sig)
+    lock_path = cache_path + ".lock"
+    STALE_LOCK_SECS = 6 * 60 * 60
+    HEARTBEAT_EVERY = 5.0
+
+    rng = np.random.default_rng(seed)
+
+    # Quick load if exists
+    if os.path.exists(cache_path):
+        if verbose: _log(f"[pair-sweep] Load cached: {os.path.basename(cache_path)}")
+        with open(cache_path, "rb") as f:
+            return pickle.load(f)
+
+    # Acquire lock (single producer; others wait)
+    got_lock = False
+    while True:
+        try:
+            fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+            os.close(fd)
+            got_lock = True
+            break
+        except FileExistsError:
+            if os.path.exists(cache_path):
+                with open(cache_path, "rb") as f:
+                    return pickle.load(f)
+            try:
+                age = time.time() - os.path.getmtime(lock_path)
+            except OSError:
+                age = 0
+            if age > STALE_LOCK_SECS:
+                if verbose: _log("[pair-sweep] Stale lock detected. Removing...")
+                try: os.remove(lock_path)
+                except FileNotFoundError: pass
+                continue
+            if verbose: _log("[pair-sweep] Waiting for cache to be ready...")
+            time.sleep(1.0)
+
+    try:
+        if verbose: _log(f"[pair-sweep] Run sweep and cache to: {os.path.basename(cache_path)}")
+
+        data = {name: {k: [] for k in range(len(pairs_list))} for name in scheduler_names}
+        last_hb = time.time()
+
+        for r in range(repeat):
+            if verbose and ((r + 1) % print_every == 0 or r == 0):
+                _log(f"[pair-sweep] Repeat {r+1}/{repeat}")
+
+            # For each N (number of destination pairs), build one fixed topology per repeat
+            for k, N_pairs in enumerate(pairs_list):
+                # Heartbeat
+                now = time.time()
+                if now - last_hb >= HEARTBEAT_EVERY:
+                    try: os.utime(lock_path, None)
+                    except FileNotFoundError: pass
+                    last_hb = now
+
+                node_path_list = [int(paths_per_pair)] * int(N_pairs)
+
+                # Fidelity bank for this N (used consistently across schedulers)
+                fidelity_bank = [_generate_fidelity_list_random_rng(rng, paths_per_pair) for _ in node_path_list]
+
+                # Importance list for this N
+                if str(importance_mode).lower() == "uniform":
+                    a, b = map(float, importance_uniform)
+                    importance_list = [float(rng.uniform(a, b)) for _ in node_path_list]
+                else:
+                    # fixed mode: default all ones
+                    importance_list = [1.0 for _ in node_path_list]
+
+                def network_generator(path_num, pair_idx):
+                    return QuantumNetwork(path_num, fidelity_bank[pair_idx], noise_model)
+
+                for name in scheduler_names:
+                    per_pair_results, total_cost, per_pair_details = run_scheduler(
+                        node_path_list=node_path_list,
+                        importance_list=importance_list,
+                        scheduler_name=name,
+                        bounces=list(bounces),
+                        C_total=int(C_total),
+                        network_generator=network_generator,
+                        return_details=True,
+                    )
+
+                    # ★ evaluation.py と同様に、真値辞書(1..L)を注入し推定辞書を 1-origin に正規化
+                    for d, det in enumerate(per_pair_details):
+                        L = node_path_list[d]
+                        est_map = det.get("est_fid_by_path", {})
+                        if est_map:
+                            est_map_norm = normalize_to_1origin({int(k): float(v) for k, v in est_map.items()}, L)
+                        else:
+                            est_map_norm = {}
+                        true_map = {pid: float(fidelity_bank[d][to_idx0(pid)]) for pid in range(1, L + 1)}
+                        if est_map_norm and not is_keys_1origin(est_map_norm.keys(), L):
+                            raise RuntimeError(f"[inject] est_fid_by_path keys not 1..{L} (pair={d})")
+                        det["est_fid_by_path"]  = est_map_norm
+                        det["true_fid_by_path"] = true_map
+
+                    data[name][k].append({
+                        "per_pair_results": per_pair_results,
+                        "per_pair_details": per_pair_details,
+                        "total_cost": total_cost,
+                        "importance_list": importance_list,
+                        "node_path_list": node_path_list,
+                    })
+
+        payload = {"config": config, "pairs_list": list(pairs_list), "data": data}
+
+        tmp = cache_path + ".tmp"
+        with open(tmp, "wb") as f:
+            pickle.dump(payload, f, protocol=pickle.HIGHEST_PROTOCOL)
+        os.replace(tmp, cache_path)
+
+        return payload
+
+    finally:
+        if got_lock:
+            try: os.remove(lock_path)
+            except FileNotFoundError: pass
+
+
+# =========================
+# Plot: Accuracy (mean ± 95% CI) vs #Destination Pairs
+# =========================
+def plot_accuracy_vs_pairs(
+    pairs_list, paths_per_pair, C_total, scheduler_names, noise_model,
+    bounces=(1,2,3,4), repeat=10,
+    importance_mode="fixed", importance_uniform=(0.0,1.0),
+    seed=None,
+    verbose=True, print_every=1,
+):
+    """
+    pairs_list: list[int]     # x-axis = number of destination pairs (N)
+    paths_per_pair: int       # number of candidate links per pair (each L_n = paths_per_pair)
+    C_total: int              # total budget for the whole experiment (fixed while N varies)
+    scheduler_names: list[str]
+    noise_model: str
+    bounces: tuple/list[int]  # NB bounce vector
+    repeat: int               # repeats per N
+    importance_mode: "fixed" or "uniform"
+    importance_uniform: (a,b) # when uniform, sample I_n ~ U[a,b]
+    seed: int
+    """
+    file_name = f"plot_accuracy_vs_pairs_{noise_model}"
+    root_dir = os.path.dirname(os.path.abspath(__file__))
+    outdir = os.path.join(root_dir, "outputs")
+    os.makedirs(outdir, exist_ok=True)
+
+    payload = _run_or_load_pair_sweep(
+        pairs_list, paths_per_pair, C_total, scheduler_names, noise_model,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform,
+        seed=seed, verbose=verbose, print_every=print_every
+    )
+
+    results = {name: {"accs": [[] for _ in pairs_list]} for name in scheduler_names}
+
+    for name in scheduler_names:
+        for k in range(len(pairs_list)):
+            for run in payload["data"][name][k]:
+                per_pair_results = run["per_pair_results"]
+
+                # Normalize elements to bool → 0/1
+                vals = []
+                for r in per_pair_results:
+                    if isinstance(r, tuple):
+                        c = r[0]
+                    elif isinstance(r, (int, float, bool)):
+                        c = bool(r)
+                    else:
+                        raise TypeError(f"Unexpected per_pair_results element: {type(r)} -> {r}")
+                    vals.append(1.0 if c else 0.0)
+
+                acc = float(np.mean(vals)) if vals else 0.0
+                results[name]["accs"][k].append(acc)
+
+    # Plot
+    plt.rc("axes", prop_cycle=_default_cycler)
+    fig, ax = plt.subplots(figsize=(8, 5), constrained_layout=True)
+    xs = list(pairs_list)
+
+    for name, data in results.items():
+        means, halfs = [], []
+        for vals in data["accs"]:
+            m, h = mean_ci95(vals)
+            means.append(m); halfs.append(h)
+        means = np.asarray(means); halfs = np.asarray(halfs)
+
+        label = name.replace("Vanilla NB","VanillaNB").replace("Succ. Elim. NB","SuccElimNB")
+        ax.plot(xs, means, linewidth=2.0, label=label)
+        ax.fill_between(xs, means - halfs, means + halfs, alpha=0.25)
+
+    ax.set_xlabel("Number of Destination Pairs (N)")
+    ax.set_ylabel("Average Correctness (mean ± 95% CI)")
+    ax.grid(True); ax.legend(title="Scheduler", fontsize=14, title_fontsize=18)
+
+    pdf = os.path.join(outdir, f"{file_name}.pdf")
+    plt.savefig(pdf)
+    if shutil.which("pdfcrop"):
+        os.system(f'pdfcrop --margins "8 8 8 8" "{pdf}" "{pdf}"')
+    _log(f"Saved: {pdf}")
+
+    return {
+        "pdf": pdf,
+        "pairs_list": list(pairs_list),
+        "config": payload["config"],
+    }
+
+
+if __name__ == "__main__":
+    # Minimal CLI for quick testing
+    pairs_list = [1, 2, 3, 4, 5, 6]
+    paths_per_pair = 5
+    C_total = 6000
+    scheduler_names = ["Greedy", "LNaive"]
+    noise_model = "Depolar"
+    bounces = (1,2,3,4)
+    repeat = 10
+    importance_mode = "uniform"
+    importance_uniform = (0.0, 1.0)
+    seed = 12
+
+    plot_accuracy_vs_pairs(
+        pairs_list, paths_per_pair, C_total, scheduler_names, noise_model,
+        bounces=bounces, repeat=repeat,
+        importance_mode=importance_mode, importance_uniform=importance_uniform,
+        seed=seed, verbose=True
+    )

+ 20 - 0
new_add_linkselfie/groups.org

@@ -0,0 +1,20 @@
+LNaiveを以下のように改良したい
+
+スケジューラに与えられた重要度によって宛先のパス数を仮想的にまとめる
+
+ 0.75 < 重要度 <= 1
+ 宛先のリンク数はまとめない
+ 0.50 < 重要度 <= 0.75
+ 2本のリンクを1本にまとめる
+ 0.25 < 重要度 <= 0.50
+ 3本のリンクを1本にまとめる
+ 0.00 < 重要度 <= 0.25
+ 全てのリンクを1本にまとめる
+
+リンクをまとめるとは
+
+path_num[1]とpath_num[2]がまとめられていると仮定する
+path_num[1,2]を1つのリンクとしてみなすということ
+path_num[1,2]にNs = 1があたえられるとpath_num[1]とpath_num[2]のどちら
+かがランダムに選出され、そのリンクで測定をする。その結果をpath_num[1]
+とpath_num[2]の両方が同じ結果として登録する

+ 176 - 0
new_add_linkselfie/main.py

@@ -0,0 +1,176 @@
+# main.py — Run experiments: budget, random-gap, fixed-gap, and #pairs
+import os
+import random
+import numpy as np
+
+from evaluation import plot_accuracy_vs_budget, plot_value_vs_used, plot_value_vs_budget
+from evaluationgap import plot_accuracy_vs_gap, plot_accuracy_vs_gap_fixgap
+from evaluationpair import plot_accuracy_vs_pairs
+
+# =====================
+# Simple configuration
+# =====================
+# Toggle which experiments to run
+RUN_BUDGET     = True
+RUN_GAP_RANDOM = False   
+RUN_GAP_FIX    = False
+RUN_PAIRS      = False
+
+# Global seed + common settings
+SEED        = 13
+NOISE_MODEL = "Depolar"
+BOUNCES     = (1, 2, 3, 4)
+REPEAT      = 5
+SCHEDULERS  = ["LNaive","Groups","Greedy","WNaive"]
+
+# Importance settings
+# NOTE: "uniform" のときは *_IMPORTANCES は使われず、各リピートで U[a,b] から再サンプルされます
+IMPORTANCE_MODE    = "fixed"          # "fixed" or "uniform"
+IMPORTANCE_UNIFORM = (0.0, 1.0)         # used only if IMPORTANCE_MODE == "uniform"
+
+# -----------------
+# 1) Budget sweep
+# -----------------
+BUDGET_LIST         = [50,100,200,500,1000]
+BUDGET_NODE_PATHS   = [5,5,5,5]
+BUDGET_IMPORTANCES  = [0.2,0.4,0.6,0.8]   # Budget専用: IMPORTANCE_MODE == "fixed" のときのみ使用
+
+# --------------
+# 2) Gap sweeps
+# --------------
+# (a) Random (alpha - beta = gap) version
+GAP_LIST_RANDOM        = [0.025, 0.05, 0.075, 0.10, 0.125, 0.150]
+ALPHA_BASE             = 0.95
+VARIANCE               = 0.025
+C_GAP_TOTAL            = 10000           # total budget per gap point
+GAP_RANDOM_NODE_PATHS  = [4,4,4,4,4]  
+GAP_RANDOM_IMPORTANCES = [0.3, 0.6, 0.9, 0.6, 0.3]  # fixed時のみ使用(長さ=ペア数に合わせる)
+
+# (b) Fixed arithmetic-sequence version
+GAP_LIST_FIX        = [0.01, 0.02,0.05,0.1]
+FIDELITY_MAX        = 1.0              # sequence starts at this max and steps down by 'gap'
+GAP_FIX_NODE_PATHS  = [4,4,4,4]  
+GAP_FIX_IMPORTANCES = [0.3, 0.6, 0.9, 0.3]    # fixed時のみ使用
+
+# --------------------
+# 3) #Pairs (N) sweep
+# --------------------
+PAIRS_LIST      = [3, 4, 5, 6, 7, 8]   # number of destination pairs
+PATHS_PER_PAIR  = 8                    # candidate links per pair
+C_PAIRS_TOTAL   = 10000                # total budget per N
+
+def set_random_seed(seed: int = 12):
+    random.seed(seed)
+    try:
+        np.random.seed(seed)
+    except Exception:
+        pass
+    try:
+        import netsquid as ns
+        ns.set_random_state(seed)
+    except Exception:
+        pass
+
+def main():
+    set_random_seed(SEED)
+    os.makedirs("outputs", exist_ok=True)
+
+    # (1) Budget vs Accuracy
+    if RUN_BUDGET:
+        plot_accuracy_vs_budget(
+            budget_list=BUDGET_LIST,
+            scheduler_names=SCHEDULERS,
+            noise_model=NOISE_MODEL,
+            node_path_list=BUDGET_NODE_PATHS,
+            importance_list=BUDGET_IMPORTANCES,
+            bounces=BOUNCES,
+            repeat=REPEAT,
+            importance_mode=IMPORTANCE_MODE,
+            importance_uniform=IMPORTANCE_UNIFORM,
+            seed=SEED,
+            verbose=True,
+        )
+        # 価値関数プロット(必要ならコメント解除)
+        plot_value_vs_used(
+            budget_list=BUDGET_LIST,
+            scheduler_names=SCHEDULERS,
+            noise_model=NOISE_MODEL,
+            node_path_list=BUDGET_NODE_PATHS,
+            importance_list=BUDGET_IMPORTANCES,
+            bounces=BOUNCES,
+            repeat=REPEAT,
+            importance_mode=IMPORTANCE_MODE,
+            importance_uniform=IMPORTANCE_UNIFORM,
+            seed=SEED,
+            verbose=True,
+        )
+
+        plot_value_vs_budget(
+            budget_list=BUDGET_LIST,
+            scheduler_names=SCHEDULERS,
+            noise_model=NOISE_MODEL,
+            node_path_list=BUDGET_NODE_PATHS,
+            importance_list=BUDGET_IMPORTANCES,
+            bounces=BOUNCES,
+            repeat=REPEAT,
+            importance_mode=IMPORTANCE_MODE,
+            importance_uniform=IMPORTANCE_UNIFORM,
+            seed=SEED,
+            verbose=True,
+        )
+
+    # (2a) Gap vs Accuracy (randomized: alpha - beta = gap)
+    if RUN_GAP_RANDOM:
+        plot_accuracy_vs_gap(
+            gap_list=GAP_LIST_RANDOM,
+            scheduler_names=SCHEDULERS,
+            noise_model=NOISE_MODEL,
+            node_path_list=GAP_RANDOM_NODE_PATHS,
+            importance_list=GAP_RANDOM_IMPORTANCES,
+            bounces=BOUNCES,
+            repeat=REPEAT,
+            importance_mode=IMPORTANCE_MODE,
+            importance_uniform=IMPORTANCE_UNIFORM,
+            seed=SEED,
+            alpha_base=ALPHA_BASE,
+            variance=VARIANCE,
+            C_total_override=C_GAP_TOTAL,
+            verbose=True,
+        )
+
+    # (2b) Gap vs Accuracy (fixed arithmetic sequence)
+    if RUN_GAP_FIX:
+        plot_accuracy_vs_gap_fixgap(
+            gap_list=GAP_LIST_FIX,
+            scheduler_names=SCHEDULERS,
+            noise_model=NOISE_MODEL,
+            node_path_list=GAP_FIX_NODE_PATHS,
+            importance_list=GAP_FIX_IMPORTANCES,
+            bounces=BOUNCES,
+            repeat=REPEAT,
+            importance_mode=IMPORTANCE_MODE,
+            importance_uniform=IMPORTANCE_UNIFORM,
+            seed=SEED,                 # used only if IMPORTANCE_MODE == "uniform"
+            fidelity_max=FIDELITY_MAX, # sequence head value
+            C_total_override=C_GAP_TOTAL,
+            verbose=True,
+        )
+
+    # (3) #Pairs vs Accuracy
+    if RUN_PAIRS:
+        plot_accuracy_vs_pairs(
+            pairs_list=PAIRS_LIST,
+            paths_per_pair=PATHS_PER_PAIR,
+            C_total=C_PAIRS_TOTAL,
+            scheduler_names=SCHEDULERS,
+            noise_model=NOISE_MODEL,
+            bounces=BOUNCES,
+            repeat=REPEAT,
+            importance_mode=IMPORTANCE_MODE,
+            importance_uniform=IMPORTANCE_UNIFORM,
+            seed=SEED,
+            verbose=True,
+        )
+
+if __name__ == "__main__":
+    main()

BIN
new_add_linkselfie/metrics/__pycache__/widths.cpython-38.pyc


+ 0 - 0
add_linkselfie/metrics/widths.py → new_add_linkselfie/metrics/widths.py


+ 0 - 0
add_linkselfie/metrics/widths.py~ → new_add_linkselfie/metrics/widths.py~


+ 0 - 0
add_linkselfie/nb_protocol.py → new_add_linkselfie/nb_protocol.py


+ 0 - 0
add_linkselfie/network.py → new_add_linkselfie/network.py


BIN
new_add_linkselfie/outputs/plot_accuracy_vs_budget_Depolar.pdf


BIN
new_add_linkselfie/outputs/plot_value_vs_budget_Depolar.pdf


Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä