Module Discovery
Module discovery enables modules to find each other for cross-module communication. TinySystems uses TinyModule CRDs as a distributed service registry.
How Discovery Works
┌─────────────────────────────────────────────────────────────────────────────┐
│ MODULE DISCOVERY ARCHITECTURE │
└─────────────────────────────────────────────────────────────────────────────┘
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ common-module │ │ http-module │ │ my-module │
│ │ │ │ │ │
│ Creates: │ │ Creates: │ │ Creates: │
│ TinyModule CR │ │ TinyModule CR │ │ TinyModule CR │
└────────┬────────┘ └────────┬────────┘ └────────┬────────┘
│ │ │
│ │ │
▼ ▼ ▼
┌─────────────────────────────────────────────────────────────────────────────┐
│ KUBERNETES API SERVER │
│ │
│ ┌─────────────────────────────────────────────────────────────────────┐ │
│ │ TinyModule CRs │ │
│ │ │ │
│ │ common-module-v1 http-module-v1 my-module-v1 │ │
│ │ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────┐ │ │
│ │ │ status: │ │ status: │ │ status: │ │ │
│ │ │ addr: ":50051" │ │ addr: ":50052" │ │ addr: ... │ │ │
│ │ │ components: │ │ components: │ │ components:│ │ │
│ │ │ - router │ │ - server │ │ - mycomp │ │ │
│ │ │ - split │ │ - client │ │ │ │ │
│ │ └──────────────────┘ └──────────────────┘ └──────────────┘ │ │
│ └─────────────────────────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────────────────┘
▲ ▲ ▲
│ │ │
│ Watch all TinyModule CRs │
│ │ │
┌────────┴────────┐ ┌────────┴────────┐ ┌────────┴────────┐
│ common-module │ │ http-module │ │ my-module │
│ │ │ │ │ │
│ ClientPool: │ │ ClientPool: │ │ ClientPool: │
│ - http-module │ │ - common-module│ │ - common-module│
│ - my-module │ │ - my-module │ │ - http-module │
└─────────────────┘ └─────────────────┘ └─────────────────┘Discovery Flow
1. MODULE STARTUP
┌────────────────────────────────────────────────────────────────────────┐
│ Module creates TinyModule CR with its name │
│ │
│ resourceManager.CreateModule(ctx, ModuleInfo{ │
│ Name: "github.com/myorg/my-module", │
│ NameSanitised: "my-module-v1", │
│ Version: "1.0.0", │
│ }) │
└────────────────────────────────────────────────────────────────────────┘
│
▼
2. LEADER ELECTION
┌────────────────────────────────────────────────────────────────────────┐
│ Multiple pods compete for leadership │
│ Winner becomes leader, others become readers │
└────────────────────────────────────────────────────────────────────────┘
│
▼
3. LEADER PUBLISHES ADDRESS
┌────────────────────────────────────────────────────────────────────────┐
│ Only leader updates TinyModule.Status: │
│ │
│ instance.Status.Addr = "my-module-v1:50051" │
│ instance.Status.Version = "1.0.0" │
│ instance.Status.Components = [ │
│ {Name: "mycomponent", Info: {...}}, │
│ ] │
│ r.Status().Update(ctx, instance) │
└────────────────────────────────────────────────────────────────────────┘
│
▼
4. OTHER MODULES DISCOVER
┌────────────────────────────────────────────────────────────────────────┐
│ TinyModuleReconciler watches ALL TinyModule CRs │
│ │
│ For each remote module: │
│ if instance.Status.Addr != "" { │
│ r.ClientPool.Register(req.Name, instance.Status.Addr) │
│ } │
└────────────────────────────────────────────────────────────────────────┘
│
▼
5. READY FOR COMMUNICATION
┌────────────────────────────────────────────────────────────────────────┐
│ ClientPool has gRPC connections to all modules │
│ Cross-module messages can now be routed │
└────────────────────────────────────────────────────────────────────────┘TinyModule Controller Implementation
go
// tinymodule_controller.go
func (r *TinyModuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
instance := &v1alpha1.TinyModule{}
if err := r.Get(ctx, req.NamespacedName, instance); err != nil {
if errors.IsNotFound(err) {
// Module removed - cleanup connections
r.ClientPool.Unregister(req.Name)
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// Is this a REMOTE module?
if req.Name != r.Module.GetNameSanitised() {
// Register remote module for cross-module communication
if instance.Status.Addr != "" {
r.ClientPool.Register(req.Name, instance.Status.Addr)
log.Info("discovered remote module",
"name", req.Name,
"address", instance.Status.Addr)
}
return ctrl.Result{}, nil
}
// This is OUR module - only leader updates
if !r.IsLeader.Load() {
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
// Leader: publish our address and components
instance.Status.Addr = r.Module.Addr
instance.Status.Version = r.Module.Version
instance.Status.Components = r.buildComponentStatus()
if err := r.Status().Update(ctx, instance); err != nil {
return ctrl.Result{RequeueAfter: 5 * time.Second}, err
}
return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil
}Client Pool
The ClientPool manages gRPC connections:
go
type Pool struct {
connections map[string]*grpc.ClientConn
mu sync.RWMutex
}
func (p *Pool) Register(moduleName, address string) {
p.mu.Lock()
defer p.mu.Unlock()
// Already registered?
if conn, exists := p.connections[moduleName]; exists {
if conn.Target() == address {
return // Same address, skip
}
// Different address - reconnect
conn.Close()
}
// Create new connection
conn, err := grpc.Dial(address,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 10 * time.Second,
Timeout: 3 * time.Second,
PermitWithoutStream: true,
}),
)
if err != nil {
log.Error("failed to connect to module",
"module", moduleName,
"address", address,
"error", err)
return
}
p.connections[moduleName] = conn
log.Info("connected to module", "module", moduleName, "address", address)
}
func (p *Pool) Get(moduleName string) (*grpc.ClientConn, bool) {
p.mu.RLock()
defer p.mu.RUnlock()
conn, ok := p.connections[moduleName]
return conn, ok
}
func (p *Pool) Unregister(moduleName string) {
p.mu.Lock()
defer p.mu.Unlock()
if conn, exists := p.connections[moduleName]; exists {
conn.Close()
delete(p.connections, moduleName)
}
}Service Discovery vs DNS
TinySystems uses CR-based discovery rather than Kubernetes DNS:
| Aspect | CR-Based | DNS-Based |
|---|---|---|
| Updates | Real-time via watch | Cached, delayed |
| Metadata | Components, version | Just address |
| Leader awareness | Only leader publishes | N/A |
| Custom data | Extensible status | Fixed format |
Kubernetes Service
The gRPC address typically points to a headless Service:
yaml
apiVersion: v1
kind: Service
metadata:
name: common-module-v1
namespace: tinysystems
spec:
type: ClusterIP
selector:
app: common-module
ports:
- name: grpc
port: 50051
targetPort: 50051Module Naming
Module names are sanitized for Kubernetes:
go
func SanitizeResourceName(name string) string {
// github.com/tiny-systems/common-module -> common-module
// Lowercase, remove special chars, truncate to 63 chars
name = strings.ToLower(name)
name = regexp.MustCompile(`[^a-z0-9-]`).ReplaceAllString(name, "-")
if len(name) > 63 {
name = name[:63]
}
return strings.Trim(name, "-")
}Cross-Module Message Routing
When the scheduler receives a message for a remote module:
go
func (s *Scheduler) Handle(ctx context.Context, msg runner.Msg) error {
// Parse destination: "node-name.port-name"
nodeName, portName := parseDestination(msg.To)
// Check local instances first
if r, exists := s.instancesMap.Get(nodeName); exists {
return r.MsgHandler(ctx, msg)
}
// Find which module owns this node
moduleName := s.findModuleForNode(nodeName)
if moduleName == "" {
return fmt.Errorf("no module found for node %s", nodeName)
}
// Get connection from pool
conn, ok := s.clientPool.Get(moduleName)
if !ok {
return fmt.Errorf("module %s not discovered", moduleName)
}
// Send via gRPC
client := pb.NewModuleServiceClient(conn)
_, err := client.Send(ctx, &pb.Message{
To: msg.To,
From: msg.From,
Data: serializeData(msg.Data),
})
return err
}Dynamic Discovery
Modules can be added/removed at runtime:
1. New module deployed
└─▶ Creates TinyModule CR
└─▶ Other modules see watch event
└─▶ ClientPool.Register() called
└─▶ Cross-module communication enabled
2. Module removed
└─▶ TinyModule CR deleted
└─▶ Other modules see delete event
└─▶ ClientPool.Unregister() called
└─▶ Connections cleaned upBest Practices
1. Wait for Discovery
go
func (s *Scheduler) Handle(ctx context.Context, msg runner.Msg) error {
moduleName := s.findModuleForNode(nodeName)
// Retry with backoff if module not yet discovered
for i := 0; i < 3; i++ {
if conn, ok := s.clientPool.Get(moduleName); ok {
return s.sendViaGRPC(ctx, conn, msg)
}
time.Sleep(time.Duration(i+1) * time.Second)
}
return fmt.Errorf("module %s not available after retries", moduleName)
}2. Handle Connection Failures
go
func (p *Pool) Get(moduleName string) (*grpc.ClientConn, error) {
conn, ok := p.connections[moduleName]
if !ok {
return nil, fmt.Errorf("module not discovered: %s", moduleName)
}
// Check connection state
if conn.GetState() == connectivity.Shutdown {
return nil, fmt.Errorf("connection shutdown: %s", moduleName)
}
return conn, nil
}Next Steps
- CR-Based State Propagation - Share state across pods
- Cross-Module Communication - gRPC details
- Horizontal Scaling - Scaling patterns