--- license: apache-2.0 task_categories: - question-answering language: - en tags: - remote_sensing - vlm size_categories: - 10K 0, "temperature": args.temperature, } answer = model.chat( tokenizer=tokenizer, pixel_values=pixel_values, question=args.question, generation_config=generation_config, verbose=True ) print("\n=== Inference Result ===") print(f"Question: {args.question}") print(f"Answer: {answer}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--checkpoint', type=str, required=True, help='Model repo ID on Hugging Face Hub') parser.add_argument('--image-path', type=str, required=True, help='Path to local input image') parser.add_argument('--question', type=str, required=True, help='Question to ask about the image') parser.add_argument('--num-beams', type=int, default=5) parser.add_argument('--temperature', type=float, default=0.0) parser.add_argument('--load-in-8bit', action='store_true') parser.add_argument('--load-in-4bit', action='store_true') parser.add_argument('--auto', action='store_true') args = parser.parse_args() run_single_inference(args) python demo_infer.py \ --checkpoint akshaydudhane/EarthDial_4B_RGB \ --image-path ./test.jpg \ --question "Which road has more vehicles?" \ --auto