diff --git a/config.toml b/config.toml
new file mode 100644
index 0000000000..0bca49ba49
--- /dev/null
+++ b/config.toml
@@ -0,0 +1,36 @@
+# config.toml
+# Unified configuration file for the gpt-engineer project
+
+# API Configuration
+[API]
+# API key for OpenAPI
+# OPENAI_API_KEY=Your personal OpenAI API key from https://platform.openai.com/account/api-keys
+OPENAI_API_KEY = "your_api_key_here"
+ANTHROPIC_API_KEY = "your_anthropic_api_key_here"
+
+# Model configurations
+[model]
+model_name = "gpt-4o"
+# Controls randomness: lower values for more focused, deterministic outputs
+temperature = 0.1
+# Endpoint for your Azure OpenAI Service (https://xx.openai.azure.com).
+# In that case, the given model is the deployment name chosen in the Azure AI Studio.
+azure_endpoint = ""
+
+# improve mode Configuration
+[improve]
+# Linting with BLACK (Python) enhances code suggestions from LLMs.
+# Enable or disable linting (true/false)
+is_linting = false
+# Enable or disable file selection. "true" will open your default editor to select the file. (true/false)
+is_file_selection = true
+
+# Git Filter Configuration
+[git_filter]
+# File extension settings for the git filter
+file_extensions = ["py", "toml", "md"]
+
+# Self-Healing Mechanism Configuration
+[self_healing]
+# Number of retry attempts for self-healing mechanisms (0-2)
+retry_attempts = 1
diff --git a/gpt_engineer/applications/cli/file_selector.py b/gpt_engineer/applications/cli/file_selector.py
index e64764059e..68414764f9 100644
--- a/gpt_engineer/applications/cli/file_selector.py
+++ b/gpt_engineer/applications/cli/file_selector.py
@@ -53,15 +53,11 @@ class FileSelector:
     IGNORE_FOLDERS = {"site-packages", "node_modules", "venv", "__pycache__"}
     FILE_LIST_NAME = "file_selection.toml"
     COMMENT = (
-        "# Remove '#' to select a file or turn off linting.\n\n"
-        "# Linting with BLACK (Python) enhances code suggestions from LLMs. "
-        "To disable linting, uncomment the relevant option in the linting settings.\n\n"
+        "# Remove '#' to select a file\n\n"
         "# gpt-engineer can only read selected files. "
         "Including irrelevant files will degrade performance, "
         "cost additional tokens and potentially overflow token limit.\n\n"
     )
-    LINTING_STRING = '[linting]\n# "linting" = "off"\n\n'
-    is_linting = True
 
     def __init__(self, project_path: Union[str, Path]):
         """
@@ -118,7 +114,7 @@ def ask_for_files(self, skip_file_selection=False) -> tuple[FilesDict, bool]:
             except UnicodeDecodeError:
                 print(f"Warning: File not UTF-8 encoded {file_path}, skipping")
 
-        return FilesDict(content_dict), self.is_linting
+        return FilesDict(content_dict)
 
     def editor_file_selector(
         self, input_path: Union[str, Path], init: bool = True
@@ -160,7 +156,6 @@ def editor_file_selector(
             # Write to the toml file
             with open(toml_file, "w") as f:
                 f.write(self.COMMENT)
-                f.write(self.LINTING_STRING)
                 f.write(s)
 
         else:
@@ -168,17 +163,6 @@ def editor_file_selector(
             all_files = self.get_current_files(root_path)
             s = toml.dumps({"files": {x: "selected" for x in all_files}})
 
-            # get linting status from the toml file
-            with open(toml_file, "r") as file:
-                linting_status = toml.load(file)
-            if (
-                "linting" in linting_status
-                and linting_status["linting"].get("linting", "").lower() == "off"
-            ):
-                self.is_linting = False
-                self.LINTING_STRING = '[linting]\n"linting" = "off"\n\n'
-                print("\nLinting is disabled")
-
             with open(toml_file, "r") as file:
                 selected_files = toml.load(file)
 
@@ -196,7 +180,6 @@ def editor_file_selector(
             # Write the merged list back to the .toml for user review and modification
             with open(toml_file, "w") as file:
                 file.write(self.COMMENT)  # Ensure to write the comment
-                file.write(self.LINTING_STRING)
                 file.write(s)
 
         print(
@@ -294,16 +277,6 @@ def get_files_from_toml(
         selected_files = []
         edited_tree = toml.load(toml_file)  # Load the edited .toml file
 
-        # check if users have disabled linting or not
-        if (
-            "linting" in edited_tree
-            and edited_tree["linting"].get("linting", "").lower() == "off"
-        ):
-            self.is_linting = False
-            print("\nLinting is disabled")
-        else:
-            self.is_linting = True
-
         # Iterate through the files in the .toml and append selected files to the list
         for file, _ in edited_tree["files"].items():
             selected_files.append(file)
diff --git a/gpt_engineer/applications/cli/main.py b/gpt_engineer/applications/cli/main.py
index 5a0c4135b7..7931c94cd1 100644
--- a/gpt_engineer/applications/cli/main.py
+++ b/gpt_engineer/applications/cli/main.py
@@ -34,6 +34,7 @@
 import sys
 
 from pathlib import Path
+from typing import Optional
 
 import openai
 import typer
@@ -60,6 +61,7 @@
 from gpt_engineer.core.files_dict import FilesDict
 from gpt_engineer.core.git import stage_uncommitted_to_git
 from gpt_engineer.core.preprompts_holder import PrepromptsHolder
+from gpt_engineer.core.project_config import Config
 from gpt_engineer.core.prompt import Prompt
 from gpt_engineer.tools.custom_steps import clarified_gen, lite_gen, self_heal
 
@@ -280,80 +282,72 @@ def format_installed_packages(packages):
 )
 def main(
     project_path: str = typer.Argument(".", help="path"),
-    model: str = typer.Option(
-        os.environ.get("MODEL_NAME", "gpt-4o"), "--model", "-m", help="model id string"
-    ),
-    temperature: float = typer.Option(
-        0.1,
+    model: Optional[str] = typer.Option(None, "--model", "-m", help="model id string"),
+    temperature: Optional[float] = typer.Option(
+        None,
         "--temperature",
         "-t",
         help="Controls randomness: lower values for more focused, deterministic outputs",
     ),
-    improve_mode: bool = typer.Option(
-        False,
+    improve_mode: Optional[bool] = typer.Option(
+        None,
         "--improve",
         "-i",
         help="Improve an existing project by modifying the files.",
     ),
-    lite_mode: bool = typer.Option(
-        False,
+    lite_mode: Optional[bool] = typer.Option(
+        None,
         "--lite",
         "-l",
         help="Lite mode: run a generation using only the main prompt.",
     ),
-    clarify_mode: bool = typer.Option(
-        False,
+    clarify_mode: Optional[bool] = typer.Option(
+        None,
         "--clarify",
         "-c",
         help="Clarify mode - discuss specification with AI before implementation.",
     ),
-    self_heal_mode: bool = typer.Option(
-        False,
+    self_heal_mode: Optional[bool] = typer.Option(
+        None,
         "--self-heal",
         "-sh",
         help="Self-heal mode - fix the code by itself when it fails.",
     ),
-    azure_endpoint: str = typer.Option(
-        "",
+    azure_endpoint: Optional[str] = typer.Option(
+        None,
         "--azure",
         "-a",
-        help="""Endpoint for your Azure OpenAI Service (https://xx.openai.azure.com).
-            In that case, the given model is the deployment name chosen in the Azure AI Studio.""",
+        help="Endpoint for your Azure OpenAI Service (https://xx.openai.azure.com). In that case, the given model is the deployment name chosen in the Azure AI Studio.",
     ),
-    use_custom_preprompts: bool = typer.Option(
-        False,
+    use_custom_preprompts: Optional[bool] = typer.Option(
+        None,
         "--use-custom-preprompts",
-        help="""Use your project's custom preprompts instead of the default ones.
-          Copies all original preprompts to the project's workspace if they don't exist there.""",
+        help="Use your project's custom preprompts instead of the default ones. Copies all original preprompts to the project's workspace if they don't exist there.",
     ),
-    llm_via_clipboard: bool = typer.Option(
-        False,
+    llm_via_clipboard: Optional[bool] = typer.Option(
+        None,
         "--llm-via-clipboard",
         help="Use the clipboard to communicate with the AI.",
     ),
-    verbose: bool = typer.Option(
-        False, "--verbose", "-v", help="Enable verbose logging for debugging."
+    verbose: Optional[bool] = typer.Option(
+        None, "--verbose", "-v", help="Enable verbose logging for debugging."
     ),
-    debug: bool = typer.Option(
-        False, "--debug", "-d", help="Enable debug mode for debugging."
+    debug: Optional[bool] = typer.Option(
+        None, "--debug", "-d", help="Enable debug mode for debugging."
     ),
-    prompt_file: str = typer.Option(
-        "prompt",
-        "--prompt_file",
-        help="Relative path to a text file containing a prompt.",
+    prompt_file: Optional[str] = typer.Option(
+        None, "--prompt_file", help="Relative path to a text file containing a prompt."
     ),
-    entrypoint_prompt_file: str = typer.Option(
-        "",
+    entrypoint_prompt_file: Optional[str] = typer.Option(
+        None,
         "--entrypoint_prompt",
-        help="Relative path to a text file containing a file that specifies requirements for you entrypoint.",
+        help="Relative path to a text file containing a file that specifies requirements for your entrypoint.",
     ),
-    image_directory: str = typer.Option(
-        "",
-        "--image_directory",
-        help="Relative path to a folder containing images.",
+    image_directory: Optional[str] = typer.Option(
+        None, "--image_directory", help="Relative path to a folder containing images."
     ),
-    use_cache: bool = typer.Option(
-        False,
+    use_cache: Optional[bool] = typer.Option(
+        None,
         "--use_cache",
         help="Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.",
     ),
@@ -366,7 +360,7 @@ def main(
     no_execution: bool = typer.Option(
         False,
         "--no_execution",
-        help="Run setup but to not call LLM or write any code. For testing purposes.",
+        help="Run setup but do not call LLM or write any code. For testing purposes.",
     ),
     sysinfo: bool = typer.Option(
         False,
@@ -428,6 +422,63 @@ def main(
     None
     """
 
+    # ask if the user wants to change the configuration
+    print(
+        "The configuration file(config.toml) is located in the root directory. You can edit it with your preferred "
+        "text editor."
+    )
+    # todo: interface to edit the configuration
+
+    # read the configuration file from the root directory
+    config = Config()
+    config_dict = config.from_toml(Path(os.getcwd()) / "config.toml").to_dict()
+
+    # todo: apply configuration here
+
+    # Loading the configuration from the config_dict
+
+    model = model or config_dict["model"]["model_name"]
+    temperature = (
+        temperature if temperature is not None else config_dict["model"]["temperature"]
+    )
+    azure_endpoint = azure_endpoint or config_dict["model"]["azure_endpoint"]
+
+    # Improve mode configuration
+    improve_mode = (
+        improve_mode
+        if improve_mode is not None
+        else config_dict["improve"]["is_file_selection"]
+    )
+    lite_mode = (
+        lite_mode if lite_mode is not None else config_dict["improve"]["is_linting"]
+    )
+
+    # Self-healing mechanism configuration
+    self_heal_mode = (
+        self_heal_mode
+        if self_heal_mode is not None
+        else config_dict["self_healing"]["retry_attempts"]
+    )
+
+    # Git filter configuration
+    config_dict["git_filter"]["file_extensions"]  # Assuming this is needed somewhere
+
+    # API keys
+    config_dict["API"]["OPENAI_API_KEY"]
+    config_dict["API"]["ANTHROPIC_API_KEY"]
+
+    # Default values for optional parameters
+    clarify_mode = clarify_mode or False
+    use_custom_preprompts = use_custom_preprompts or False
+    llm_via_clipboard = llm_via_clipboard or False
+    verbose = verbose or False
+    debug = debug or False
+    prompt_file = prompt_file or "prompt"
+    entrypoint_prompt_file = entrypoint_prompt_file or ""
+    image_directory = image_directory or ""
+    use_cache = use_cache or False
+    no_execution = no_execution or False
+
     if debug:
         import pdb
 
@@ -517,9 +568,10 @@ def main(
             files_dict_before, is_linting = FileSelector(project_path).ask_for_files(
                 skip_file_selection=skip_file_selection
             )
+            files_dict_before = FileSelector(project_path).ask_for_files()
 
             # lint the code
-            if is_linting:
+            if config_dict["improve"]["is_linting"]:
                 files_dict_before = files.linting(files_dict_before)
 
             files_dict = handle_improve_mode(
diff --git a/gpt_engineer/core/project_config.py b/gpt_engineer/core/project_config.py
index 137a5558c8..4488d6503c 100644
--- a/gpt_engineer/core/project_config.py
+++ b/gpt_engineer/core/project_config.py
@@ -1,81 +1,50 @@
-"""
-Functions for reading and writing the `gpt-engineer.toml` configuration file.
-
-The `gpt-engineer.toml` file is a TOML file that contains project-specific configuration used by the GPT Engineer CLI and gptengineer.app.
-"""
-from dataclasses import asdict, dataclass, field
+from dataclasses import dataclass, field
 from pathlib import Path
+from typing import Any, Dict
 
 import tomlkit
 
-default_config_filename = "gpt-engineer.toml"
+default_config_filename = "config.toml"
 
 example_config = """
-[run]
-build = "npm run build"
-test = "npm run test"
-lint = "quick-lint-js"
-
-[paths]
-base = "./frontend"  # base directory to operate in (for monorepos)
-src = "./src"        # source directory (under the base directory) from which context will be retrieved
-
-[gptengineer-app]  # this namespace is used for gptengineer.app, may be used for internal experiments
-project_id = "..."
-
-# we support multiple OpenAPI schemas, used as context for the LLM
-openapi = [
-    { url = "https://api.gptengineer.app/openapi.json" },
-    { url = "https://some-color-translating-api/openapi.json" },
-]
+# API Configuration
+[API]
+OPENAI_API_KEY = "..."
+ANTHROPIC_API_KEY = "..."
+
+# Model configurations
+[model]
+model_name = "gpt-4o"
+temperature = 0.1
+azure_endpoint = ""
+
+# improve mode Configuration
+[improve]
+is_linting = false
+is_file_selection = true
+
+# Git Filter Configuration
+[git_filter]
+file_extensions = ["py", "toml", "md"]
+
+# Self-Healing Mechanism Configuration
+[self_healing]
+retry_attempts = 1
 """
 
 
-@dataclass
-class _PathsConfig:
-    base: str | None = None
-    src: str | None = None
-
-
-@dataclass
-class _RunConfig:
-    build: str | None = None
-    test: str | None = None
-    lint: str | None = None
-    format: str | None = None
-
-
-@dataclass
-class _OpenApiConfig:
-    url: str
-
-
-@dataclass
-class _GptEngineerAppConfig:
-    project_id: str
-    openapi: list[_OpenApiConfig] | None = None
-
-
-def filter_none(d: dict) -> dict:
-    # Drop None values and empty dictionaries from a dictionary
-    return {
-        k: v
-        for k, v in (
-            (k, filter_none(v) if isinstance(v, dict) else v)
-            for k, v in d.items()
-            if v is not None
-        )
-        if not (isinstance(v, dict) and not v)  # Check for non-empty after filtering
-    }
-
-
 @dataclass
 class Config:
-    """Configuration for the GPT Engineer CLI and gptengineer.app via `gpt-engineer.toml`."""
+    """Configuration for the GPT Engineer project"""
 
-    paths: _PathsConfig = field(default_factory=_PathsConfig)
-    run: _RunConfig = field(default_factory=_RunConfig)
-    gptengineer_app: _GptEngineerAppConfig | None = None
+    api_config: Dict[str, Any] = field(default_factory=dict)
+    model_config: Dict[str, Any] = field(default_factory=dict)
+    improve_config: Dict[str, Any] = field(default_factory=dict)
+    git_filter_config: Dict[str, Any] = field(default_factory=dict)
+    self_healing_config: Dict[str, Any] = field(default_factory=dict)
+    other_sections: Dict[str, Any] = field(
+        default_factory=dict
+    )  # To handle any other sections dynamically
 
     @classmethod
     def from_toml(cls, config_file: Path | str):
@@ -86,35 +55,36 @@ def from_toml(cls, config_file: Path | str):
 
     @classmethod
     def from_dict(cls, config_dict: dict):
-        run = _RunConfig(**config_dict.get("run", {}))
-        paths = _PathsConfig(**config_dict.get("paths", {}))
-
-        # load optional gptengineer-app section
-        gptengineer_app_dict = config_dict.get("gptengineer-app", {})
-        gptengineer_app = None
-        if gptengineer_app_dict:
-            assert (
-                "project_id" in gptengineer_app_dict
-            ), "project_id is required in gptengineer-app section"
-            gptengineer_app = _GptEngineerAppConfig(
-                # required if gptengineer-app section is present
-                project_id=gptengineer_app_dict["project_id"],
-                openapi=[
-                    _OpenApiConfig(**openapi)
-                    for openapi in gptengineer_app_dict.get("openapi", [])
-                ]
-                or None,
-            )
-
-        return cls(paths=paths, run=run, gptengineer_app=gptengineer_app)
+        api_config = config_dict.get("API", {})
+        model_config = config_dict.get("model", {})
+        improve_config = config_dict.get("improve", {})
+        git_filter_config = config_dict.get("git_filter", {})
+        self_healing_config = config_dict.get("self_healing", {})
+
+        # Extract other sections not explicitly handled
+        handled_keys = {"API", "model", "improve", "git_filter", "self_healing"}
+        other_sections = {k: v for k, v in config_dict.items() if k not in handled_keys}
+
+        return cls(
+            api_config=api_config,
+            model_config=model_config,
+            improve_config=improve_config,
+            git_filter_config=git_filter_config,
+            self_healing_config=self_healing_config,
+            other_sections=other_sections,
+        )
 
     def to_dict(self) -> dict:
-        d = asdict(self)
-        d["gptengineer-app"] = d.pop("gptengineer_app", None)
+        d = {
+            "API": self.api_config,
+            "model": self.model_config,
+            "improve": self.improve_config,
+            "git_filter": self.git_filter_config,
+            "self_healing": self.self_healing_config,
+        }
+        d.update(self.other_sections)  # Add other dynamic sections
 
         # Drop None values and empty dictionaries
-        # Needed because tomlkit.dumps() doesn't handle None values,
-        # and we don't want to write empty sections.
         d = filter_none(d)
 
         return d
@@ -129,15 +99,15 @@ def to_toml(self, config_file: Path | str, save=True) -> str:
         default_config = Config().to_dict()
         for k, v in self.to_dict().items():
             # only write values that are already explicitly set, or that differ from defaults
-            if k in config or v != default_config[k]:
+            if k in config or v != default_config.get(k):
                 if isinstance(v, dict):
                     config[k] = {
                         k2: v2
                         for k2, v2 in v.items()
                         if (
-                            k2 in config[k]
+                            k2 in config.get(k, {})
                             or default_config.get(k) is None
-                            or v2 != default_config[k].get(k2)
+                            or v2 != default_config.get(k, {}).get(k2)
                         )
                     }
                 else:
@@ -156,3 +126,16 @@ def read_config(config_file: Path) -> tomlkit.TOMLDocument:
     assert config_file.exists(), f"Config file {config_file} does not exist"
     with open(config_file, "r") as f:
         return tomlkit.load(f)
+
+
+def filter_none(d: dict) -> dict:
+    """Drop None values and empty dictionaries from a dictionary"""
+    return {
+        k: v
+        for k, v in (
+            (k, filter_none(v) if isinstance(v, dict) else v)
+            for k, v in d.items()
+            if v is not None
+        )
+        if not (isinstance(v, dict) and not v)  # Check for non-empty after filtering
+    }
diff --git a/tests/test_project_config.py b/tests/test_project_config.py
index 8aab8a2e7e..f5d0182d7b 100644
--- a/tests/test_project_config.py
+++ b/tests/test_project_config.py
@@ -2,49 +2,32 @@
 
 import pytest
 
-from gpt_engineer.core.project_config import (
-    Config,
-    _GptEngineerAppConfig,
-    _OpenApiConfig,
-    example_config,
-    filter_none,
-)
+from gpt_engineer.core.project_config import Config, example_config, filter_none
 
 
 def test_config_load():
-    # write example config to a file
+    # Write example config to a file
     with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
         f.write(example_config)
 
-    # load the config from the file
+    # Load the config from the file
     config = Config.from_toml(f.name)
 
-    assert config.paths.base == "./frontend"
-    assert config.paths.src == "./src"
-    assert config.run.build == "npm run build"
-    assert config.run.test == "npm run test"
-    assert config.run.lint == "quick-lint-js"
-    assert config.gptengineer_app
-    assert config.gptengineer_app.project_id == "..."
-    assert config.gptengineer_app.openapi
-    assert (
-        config.gptengineer_app.openapi[0].url
-        == "https://api.gptengineer.app/openapi.json"
-    )
-    assert (
-        config.gptengineer_app.openapi[1].url
-        == "https://some-color-translating-api/openapi.json"
-    )
+    assert config.api_config["OPENAI_API_KEY"] == "..."
+    assert config.api_config["ANTHROPIC_API_KEY"] == "..."
+    assert config.model_config["model_name"] == "gpt-4o"
+    assert config.model_config["temperature"] == 0.1
+    assert config.improve_config["is_linting"] is False
+    assert config.improve_config["is_file_selection"] is True
     assert config.to_dict()
     assert config.to_toml(f.name, save=False)
 
-    # check that write+read is idempotent
+    # Check that write+read is idempotent
     assert Config.from_toml(f.name) == config
 
 
 def test_config_defaults():
     config = Config()
-    assert config.paths.base is None
     with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
         config.to_toml(f.name)
 
@@ -57,69 +40,47 @@ def test_config_defaults():
 
 
 def test_config_from_dict():
-    d = {"gptengineer-app": {"project_id": "..."}}  # minimal example
+    d = {"improve": {"is_linting": "..."}}  # Minimal example
     config = Config.from_dict(d)
-    assert config.gptengineer_app
-    assert config.gptengineer_app.project_id == "..."
+    assert config.improve_config["is_linting"] == "..."
     config_dict = config.to_dict()
 
-    # check that the config dict matches the input dict exactly (no keys/defaults added)
+    # Check that the config dict matches the input dict exactly (no keys/defaults added)
     assert config_dict == d
 
 
-def test_config_from_dict_with_openapi():
-    # A good test because it has 3 levels of nesting
-    d = {
-        "gptengineer-app": {
-            "project_id": "...",
-            "openapi": [
-                {"url": "https://api.gptengineer.app/openapi.json"},
-            ],
-        }
-    }
-    config = Config.from_dict(d)
-    assert config.gptengineer_app
-    assert config.gptengineer_app.project_id == "..."
-    assert config.gptengineer_app.openapi
-    assert (
-        config.gptengineer_app.openapi[0].url
-        == "https://api.gptengineer.app/openapi.json"
-    )
-
-
 def test_config_load_partial():
-    # Loads a partial config, and checks that the rest is not set (i.e. None)
-    example_config = """
-[gptengineer-app]
-project_id = "..."
+    # Loads a partial config, and checks that the rest is not set (i.e., None)
+    partial_config = """
+[improve]
+is_linting = "..."
 """.strip()
     with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
-        f.write(example_config)
+        f.write(partial_config)
 
     config = Config.from_toml(f.name)
-    assert config.gptengineer_app
-    assert config.gptengineer_app.project_id == "..."
+    assert config.improve_config["is_linting"] == "..."
     assert config.to_dict()
     toml_str = config.to_toml(f.name, save=False)
-    assert toml_str == example_config
+    assert toml_str.strip() == partial_config
 
-    # check that write+read is idempotent
+    # Check that write+read is idempotent
     assert Config.from_toml(f.name) == config
 
 
 def test_config_update():
-    example_config = """
-[gptengineer-app]
-project_id = "..."
+    initial_config = """
+[improve]
+is_linting = "..."
 """.strip()
     with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
-        f.write(example_config)
+        f.write(initial_config)
+
     config = Config.from_toml(f.name)
-    config.gptengineer_app = _GptEngineerAppConfig(
-        project_id="...",
-        openapi=[_OpenApiConfig(url="https://api.gptengineer.app/openapi.json")],
-    )
+    config.improve_config = {"is_linting": False, "is_file_selection": True}
     config.to_toml(f.name)
+
+    # Check that updated values are written and read correctly
     assert Config.from_toml(f.name) == config